diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml new file mode 100644 index 000000000000..bba3dcd3ada2 --- /dev/null +++ b/.azure-pipelines/bazel.yml @@ -0,0 +1,57 @@ +parameters: + - name: ciTarget + displayName: "CI target" + type: string + default: bazel.release + +steps: + - task: CacheBeta@1 + inputs: + key: '"${{ parameters.ciTarget }}" | ./WORKSPACE | **/*.bzl' + path: $(Build.StagingDirectory)/repository_cache + + - bash: | + echo "disk space at beginning of build:" + df -h + displayName: "Check disk space at beginning" + + - bash: | + sudo mkdir -p /etc/docker + echo '{ + "ipv6": true, + "fixed-cidr-v6": "2001:db8:1::/64" + }' | sudo tee /etc/docker/daemon.json + sudo service docker restart + displayName: "Enable IPv6" + + - script: ci/run_envoy_docker.sh 'ci/do_ci.sh ${{ parameters.ciTarget }}' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + ENVOY_RBE: "true" + # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks + # to save disk space. + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) --curses=no --experimental_repository_cache_hardlinks" + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Run CI script" + + - bash: | + echo "disk space at end of build:" + df -h + displayName: "Check disk space at end" + condition: always() + + - task: PublishTestResults@2 + inputs: + testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml" + testRunTitle: "${{ parameters.ciTarget }}" + searchFolder: $(Build.StagingDirectory)/tmp + condition: always() + + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/envoy" + artifactName: ${{ parameters.ciTarget }} + condition: always() diff --git a/.azure-pipelines/linux.yml b/.azure-pipelines/linux.yml deleted file mode 100644 index 06b309c9d206..000000000000 --- a/.azure-pipelines/linux.yml +++ /dev/null @@ -1,146 +0,0 @@ -trigger: - branches: - include: - - 'master' - tags: - include: - - 'v*' - -# PR build config is manually overridden in Azure pipelines UI with different secrets -pr: none - -jobs: -- job: format - dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. - pool: - vmImage: 'Ubuntu 16.04' - steps: - - task: CacheBeta@1 - inputs: - key: 'format | ./WORKSPACE | **/*.bzl' - path: $(Build.StagingDirectory)/repository_cache - - - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh' - workingDirectory: $(Build.SourcesDirectory) - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - displayName: "Run check format scripts" - - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff" - artifactName: format - condition: failed() - -- job: bazel - dependsOn: ["format"] - # For master builds, continue even if format fails - condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) - strategy: - maxParallel: 3 - matrix: - gcc: - CI_TARGET: 'bazel.gcc' - clang_tidy: - CI_TARGET: 'bazel.clang_tidy' - asan: - CI_TARGET: 'bazel.asan' - tsan: - CI_TARGET: 'bazel.tsan' - compile_time_options: - CI_TARGET: 'bazel.compile_time_options' - release: - CI_TARGET: 'bazel.release' - timeoutInMinutes: 360 - pool: - vmImage: 'Ubuntu 16.04' - steps: - - task: CacheBeta@1 - inputs: - key: '"$(CI_TARGET)" | ./WORKSPACE | **/*.bzl' - path: $(Build.StagingDirectory)/repository_cache - - - bash: | - echo "disk space at beginning of build:" - df -h - displayName: "Check disk space at beginning" - - - bash: | - sudo mkdir -p /etc/docker - echo '{ - "ipv6": true, - "fixed-cidr-v6": "2001:db8:1::/64" - }' | sudo tee /etc/docker/daemon.json - sudo service docker restart - displayName: "Enable IPv6" - - - script: ci/run_envoy_docker.sh 'ci/do_ci.sh $(CI_TARGET)' - workingDirectory: $(Build.SourcesDirectory) - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - ENVOY_RBE: "true" - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) --curses=no" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - displayName: "Run CI script" - - - bash: | - echo "disk space at end of build:" - df -h - displayName: "Check disk space at end" - condition: always() - - - task: PublishTestResults@2 - inputs: - testResultsFiles: '**/bazel-out/**/testlogs/**/test.xml' - testRunTitle: '$(CI_TARGET)' - searchFolder: $(Build.StagingDirectory)/tmp - condition: always() - - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/envoy" - artifactName: $(CI_TARGET) - condition: always() - - # https://stackoverflow.com/questions/57466218/is-an-azure-devops-build-pipeline-is-there-a-way-to-cancel-one-pipeline-job-fro - - powershell: | - $url = "$($env:SYSTEM_TEAMFOUNDATIONCOLLECTIONURI)$env:SYSTEM_TEAMPROJECTID/_apis/build/builds/$($env:BUILD_BUILDID)?api-version=2.0" - $header = @{ Authorization = "Bearer $env:SYSTEM_ACCESSTOKEN" } - $body = @{ 'status'='Cancelling' } | ConvertTo-Json - Invoke-RestMethod -Uri $url -Method Patch -Body $body -Headers $header -ContentType application/json - displayName: Cancel the pipeline - env: - SYSTEM_ACCESSTOKEN: $(System.AccessToken) - condition: and(failed(), eq(variables['Build.Reason'], 'PullRequest')) - -- job: docker - dependsOn: ["bazel"] - condition: and(succeeded(), eq(variables['PostSubmit'], 'true'), ne(variables['Build.Reason'], 'PullRequest')) - pool: - vmImage: 'Ubuntu 16.04' - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "bazel.release" - itemPattern: "bazel.release/envoy_binary.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - - bash: | - set -e - tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz - ci/docker_build.sh - ci/docker_push.sh - ci/docker_tag.sh - workingDirectory: $(Build.SourcesDirectory) - env: - AZP_BRANCH: $(Build.SourceBranch) - CIRCLE_SHA1: $(Build.SourceVersion) - DOCKERHUB_USERNAME: $(DockerUsername) - DOCKERHUB_PASSWORD: $(DockerPassword) diff --git a/.azure-pipelines/linux.yml b/.azure-pipelines/linux.yml new file mode 120000 index 000000000000..ea3cc67f3da8 --- /dev/null +++ b/.azure-pipelines/linux.yml @@ -0,0 +1 @@ +pipelines.yml \ No newline at end of file diff --git a/.azure-pipelines/macos.yml b/.azure-pipelines/macos.yml deleted file mode 100644 index 20655177351d..000000000000 --- a/.azure-pipelines/macos.yml +++ /dev/null @@ -1,26 +0,0 @@ -# Azure Pipelines -trigger: -- master - -jobs: -- job: macOS - timeoutInMinutes: 360 - pool: - vmImage: 'macos-latest' - - steps: - - script: ./ci/mac_ci_setup.sh - displayName: 'Install dependencies' - - - script: ./ci/mac_ci_steps.sh - displayName: 'Run Mac CI' - env: - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - - - task: PublishTestResults@2 - inputs: - testResultsFiles: '**/bazel-testlogs/**/test.xml' - testRunTitle: 'macOS' - condition: always() diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml new file mode 100644 index 000000000000..69338021a085 --- /dev/null +++ b/.azure-pipelines/pipelines.yml @@ -0,0 +1,145 @@ +trigger: + branches: + include: + - "master" + tags: + include: + - "v*" + +# PR build config is manually overridden in Azure pipelines UI with different secrets +pr: none + +jobs: + - job: format + dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. + pool: + vmImage: "ubuntu-16.04" + steps: + - task: CacheBeta@1 + inputs: + key: "format | ./WORKSPACE | **/*.bzl" + path: $(Build.StagingDirectory)/repository_cache + + - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Run check format scripts" + + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff" + artifactName: format + condition: failed() + + - job: release + displayName: "Linux-x64 release" + dependsOn: ["format"] + # For master builds, continue even if format fails + condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) + timeoutInMinutes: 360 + pool: + vmImage: "ubuntu-16.04" + steps: + - template: bazel.yml + parameters: + ciTarget: bazel.release + + - job: bazel + displayName: "Linux-x64" + dependsOn: ["release"] + # For master builds, continue even if format fails + condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) + strategy: + maxParallel: 3 + matrix: + gcc: + CI_TARGET: "bazel.gcc" + clang_tidy: + CI_TARGET: "bazel.clang_tidy" + asan: + CI_TARGET: "bazel.asan" + tsan: + CI_TARGET: "bazel.tsan" + compile_time_options: + CI_TARGET: "bazel.compile_time_options" + timeoutInMinutes: 360 + pool: + vmImage: "Ubuntu 16.04" + steps: + - template: bazel.yml + parameters: + ciTarget: $(CI_TARGET) + + - job: docker + displayName: "Linux-x64 docker" + dependsOn: ["release"] + condition: and(succeeded(), eq(variables['PostSubmit'], 'true'), ne(variables['Build.Reason'], 'PullRequest')) + pool: + vmImage: "ubuntu-16.04" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release" + itemPattern: "bazel.release/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) + + - bash: | + set -e + tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz + ci/docker_build.sh + ci/docker_push.sh + ci/docker_tag.sh + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + CIRCLE_SHA1: $(Build.SourceVersion) + DOCKERHUB_USERNAME: $(DockerUsername) + DOCKERHUB_PASSWORD: $(DockerPassword) + + - job: macOS + dependsOn: ["format"] + timeoutInMinutes: 360 + pool: + vmImage: "macos-latest" + steps: + - script: ./ci/mac_ci_setup.sh + displayName: "Install dependencies" + + - script: ./ci/mac_ci_steps.sh + displayName: "Run Mac CI" + env: + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + + - task: PublishTestResults@2 + inputs: + testResultsFiles: "**/bazel-testlogs/**/test.xml" + testRunTitle: "macOS" + condition: always() + + - job: Windows + dependsOn: ["format"] + timeoutInMinutes: 360 + pool: + vmImage: "windows-latest" + steps: + - powershell: | + .\ci\windows_ci_setup.ps1 + Write-Host "##vso[task.prependpath]$env:TOOLS_BIN_DIR" + displayName: "Install dependencies" + env: + TOOLS_BIN_DIR: $(Pipeline.Workspace)\bin + + - bash: ci/windows_ci_steps.sh + displayName: "Run Windows CI" + env: + TMPDIR: $(Agent.TempDirectory) + BAZEL_VC: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\VC" + BAZEL_SH: "C:\\Program Files\\Git\\bin\\bash.exe" diff --git a/.azure-pipelines/windows.yml b/.azure-pipelines/windows.yml deleted file mode 100644 index 232b739acf8e..000000000000 --- a/.azure-pipelines/windows.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Azure Pipelines -# TODO(lizan): Consider rolling all presubmit jobs into one file. -trigger: -- master - -jobs: -- job: Windows - timeoutInMinutes: 360 - pool: - vmImage: "windows-latest" - - steps: - - powershell: | - .\ci\windows_ci_setup.ps1 - Write-Host "##vso[task.prependpath]$env:TOOLS_BIN_DIR" - displayName: "Install dependencies" - env: - TOOLS_BIN_DIR: $(Pipeline.Workspace)\bin - - - bash: ci/windows_ci_steps.sh - displayName: "Run Windows CI" - env: - TMPDIR: $(Agent.TempDirectory) - BAZEL_VC: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\VC" - BAZEL_SH: "C:\\Program Files\\Git\\bin\\bash.exe" diff --git a/.bazelrc b/.bazelrc index 3452cc1c89f9..648beb564106 100644 --- a/.bazelrc +++ b/.bazelrc @@ -13,7 +13,7 @@ startup --host_jvm_args=-Xmx2g build --workspace_status_command=bazel/get_workspace_status build --experimental_local_memory_estimate build --experimental_strict_action_env=true -build --host_force_python=PY2 +build --host_force_python=PY3 build --action_env=BAZEL_LINKLIBS=-l%:libstdc++.a build --action_env=BAZEL_LINKOPTS=-lm build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11 @@ -159,7 +159,7 @@ build:remote-msan --config=rbe-toolchain-msan # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L7 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu@sha256:3788a87461f2b3dc8048ad0ce5df40438a56e0a8f1a4ab0f61b4ef0d8c11ff1f +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu@sha256:ebf534b8aa505e8ff5663a31eed782942a742ae4d656b54f4236b00399f17911 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker @@ -198,5 +198,12 @@ build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer build:plain-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link +# Compile database generation config +# We don't care about built binaries so always strip and use fastbuild. +build:compdb -c fastbuild +build:compdb --strip=always +build:compdb --build_tag_filters=-nocompdb +build:compdb --define=ENVOY_CONFIG_COMPILATION_DATABASE=1 + try-import %workspace%/clang.bazelrc try-import %workspace%/user.bazelrc diff --git a/.bazelversion b/.bazelversion index 227cea215648..ccbccc3dc626 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -2.0.0 +2.2.0 diff --git a/.clang-tidy b/.clang-tidy index c3c4b9349d68..93d48258a9ae 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,19 +1,5 @@ -Checks: 'abseil-*, - bugprone-*, - clang-analyzer-*, - clang-diagnostic-*, - misc-unused-using-decls, - modernize-*, - -modernize-pass-by-value, - -modernize-use-trailing-return-type, - performance-*, - readability-braces-around-statements, - readability-container-size-empty, - readability-identifier-naming, - readability-redundant-*' - -#TODO(lizan): grow this list, fix possible warnings and make more checks as error -WarningsAsErrors: 'abseil-duration-*, +Checks: '-clang-analyzer-optin.cplusplus.UninitializedObject, + abseil-duration-*, abseil-faster-strsplit-delimiter, abseil-no-namespace, abseil-redundant-strcat-calls, @@ -51,6 +37,8 @@ WarningsAsErrors: 'abseil-duration-*, readability-redundant-smartptr-get, readability-redundant-string-cstr' +WarningsAsErrors: '*' + CheckOptions: - key: bugprone-assert-side-effect.AssertMacros value: 'ASSERT' @@ -87,3 +75,6 @@ CheckOptions: - key: readability-identifier-naming.UnionCase value: 'CamelCase' + + - key: readability-identifier-naming.FunctionCase + value: 'camelBack' diff --git a/.gitignore b/.gitignore index 2c30faa9bc6a..d9a4f40e9b02 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,8 @@ TAGS .vimrc .vs .vscode +clang-tidy-fixes.yaml +.gdb_history clang.bazelrc user.bazelrc **/Cargo.lock diff --git a/CODEOWNERS b/CODEOWNERS index 79cfffb43df3..86b4d82310cf 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -72,6 +72,7 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/common/wasm @jplevyak @PiotrSikora @lizan # common crypto extension /*/extensions/common/crypto @lizan @PiotrSikora @bdecoste +/*/extensions/common/proxy_protocol @alyssawilk @wez470 /*/extensions/filters/http/grpc_http1_bridge @snowp @jose /*/extensions/filters/http/gzip @gsagula @dio /*/extensions/filters/http/fault @rshriram @alyssawilk @@ -100,6 +101,7 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/network/ext_authz @gsagula @dio /*/extensions/filters/network/tcp_proxy @alyssawilk @zuercher /*/extensions/filters/network/echo @htuch @alyssawilk +/*/extensions/filters/udp/dns_filter @abaptiste @mattklein123 /*/extensions/filters/network/direct_response @kyessenov @zuercher /*/extensions/filters/udp/udp_proxy @mattklein123 @danzh2010 /*/extensions/clusters/aggregate @yxue @snowp diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 0df8070ee7d7..4f9dcbc7bfdb 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -79,13 +79,15 @@ or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/get * Begin marshalling the ongoing PR flow in this repo. Ask maintainers to hold off merging any particularly risky PRs until after the release is tagged. This is because we aim for master to be at release candidate quality at all times. -* Do a final check of the [release notes](docs/root/intro/version_history.rst) and make any needed - corrections. -* Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to - "1.6.0". Also remove the "Pending" tags and add dates to the top of the [release notes](docs/root/intro/version_history.rst) - and [deprecated log](docs/root/intro/deprecated.rst). Get a review and merge. -* **Wait for tests to pass on - [master](https://circleci.com/gh/envoyproxy/envoy/tree/master).** +* Do a final check of the [release notes](docs/root/intro/version_history.rst): + * Make any needed corrections (grammar, punctuation, formatting, etc.). + * Check to see if any security/stable version release notes are duplicated in + the major version release notes. These should not be duplicated. + * Remove the "Pending" tags and add dates to the top of the [release notes](docs/root/intro/version_history.rst) + and [deprecated log](docs/root/intro/deprecated.rst). + * Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to + "1.6.0". + * Get a review and merge. * Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should start with "v" and be followed by the version number. E.g., "v1.6.0". **This must match the [VERSION](VERSION).** diff --git a/VERSION b/VERSION index 2f2e08cfa3bf..63e799cf451b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.14.0-dev +1.14.1 diff --git a/api/BUILD b/api/BUILD index 7bb2ec8b5486..84c9ec0c2465 100644 --- a/api/BUILD +++ b/api/BUILD @@ -57,7 +57,6 @@ proto_library( "//envoy/config/filter/http/squash/v2:pkg", "//envoy/config/filter/http/tap/v2alpha:pkg", "//envoy/config/filter/http/transcoder/v2:pkg", - "//envoy/config/filter/http/wasm/v2:pkg", "//envoy/config/filter/listener/http_inspector/v2:pkg", "//envoy/config/filter/listener/original_dst/v2:pkg", "//envoy/config/filter/listener/original_src/v2alpha1:pkg", @@ -79,10 +78,10 @@ proto_library( "//envoy/config/filter/network/sni_cluster/v2:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", - "//envoy/config/filter/network/wasm/v2:pkg", "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", + "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", "//envoy/config/health_checker/redis/v2:pkg", @@ -106,10 +105,12 @@ proto_library( "//envoy/data/accesslog/v2:pkg", "//envoy/data/cluster/v2alpha:pkg", "//envoy/data/core/v2alpha:pkg", + "//envoy/data/dns/v2alpha:pkg", "//envoy/data/tap/v2alpha:pkg", "//envoy/service/accesslog/v2:pkg", "//envoy/service/auth/v2:pkg", "//envoy/service/discovery/v2:pkg", + "//envoy/service/event_reporting/v2alpha:pkg", "//envoy/service/load_stats/v2:pkg", "//envoy/service/metrics/v2:pkg", "//envoy/service/ratelimit/v2:pkg", @@ -149,9 +150,11 @@ proto_library( "//envoy/config/route/v3:pkg", "//envoy/config/tap/v3:pkg", "//envoy/config/trace/v3:pkg", + "//envoy/config/wasm/v3:pkg", "//envoy/data/accesslog/v3:pkg", "//envoy/data/cluster/v3:pkg", "//envoy/data/core/v3:pkg", + "//envoy/data/dns/v3:pkg", "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", @@ -162,6 +165,7 @@ proto_library( "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", + "//envoy/extensions/filter/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", @@ -225,12 +229,12 @@ proto_library( "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/extensions/wasm/v3:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", "//envoy/service/cluster/v3:pkg", "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", + "//envoy/service/event_reporting/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", "//envoy/service/load_stats/v3:pkg", diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md index b11d3ddfccf0..dc77573c683b 100644 --- a/api/CONTRIBUTING.md +++ b/api/CONTRIBUTING.md @@ -9,7 +9,7 @@ changes. They may be as part of a larger implementation PR. Please follow the st process for validating build/test sanity of `api/` before submitting a PR. *Note: New .proto files should be added to -[BUILD](https://github.com/envoyproxy/envoy/blob/master/api/docs/BUILD) in order to get the RSTs generated.* +[BUILD](https://github.com/envoyproxy/envoy/blob/master/api/versioning/BUILD) in order to get the RSTs generated.* ## Documentation changes diff --git a/api/STYLE.md b/api/STYLE.md index ba30c4c2a8ac..791f9cf7079d 100644 --- a/api/STYLE.md +++ b/api/STYLE.md @@ -85,19 +85,7 @@ In addition, the following conventions should be followed: ## Package organization -API definitions are layered hierarchically in packages from top-to-bottom in v2 as following: - -- `envoy.service` contains gRPC definitions of supporting services; -- `envoy.config` contains definitions for service configuration, filter -configuration, and bootstrap; -- `envoy.api.v2` contains definitions for EDS, CDS, RDS, LDS, and top-level -resources such as `Cluster`; -- `envoy.api.v2.endpoint`, `envoy.api.v2.cluster`, `envoy.api.v2.route`, -`envoy.api.v2.listener`, `envoy.api.v2.ratelimit` define sub-messages of the top-level resources; -- `envoy.api.v2.core` and `envoy.api.v2.auth` hold core definitions consumed -throughout the API. - -In Envoy API v3, API definitions are layered hierarchically in packages from top-to-bottom as following: +API definitions are layered hierarchically in packages from top-to-bottom as following: - `envoy.extensions` contains all definitions for the extensions, the package should match the structure of the `source` directory. - `envoy.service` contains gRPC definitions of supporting services and top-level messages for the services. e.g. `envoy.service.route.v3` contains RDS, `envoy.service.listener.v3` contains LDS. @@ -105,27 +93,21 @@ e.g. `envoy.service.route.v3` contains RDS, `envoy.service.listener.v3` contains - `envoy.data` contains data format declaration for data types that Envoy produces. - `envoy.type` contains common protobuf types such as percent, range and matchers. -Dependencies are enforced from top-to-bottom using visibility constraints in -the build system to prevent circular dependency formation. Package group -`//envoy/api/v2:friends` selects consumers of the core API package (services and configs) -and is the default visibility for the core API packages. The default visibility -for services and configs should be `//docs` (proto documentation tool). - Extensions should use the regular hierarchy. For example, configuration for network filters belongs -in a package under `envoy.config.filter.network`. +in a package under `envoy.extensions.filter.network`. ## Adding an extension configuration to the API -Extensions must currently be added as v2 APIs following the [package +Extensions must currently be added as v3 APIs following the [package organization](#package-organization) above. To add an extension config to the API, the steps below should be followed: 1. If this is still WiP and subject to breaking changes, use `vNalpha` instead of `vN` in steps - below. Refer to the [Cache filter config](envoy/config/filter/http/cache/v2alpha/cache.proto) - as an example of `v2alpha`, and the - [Buffer filter config](envoy/config/filter/http/buffer/v2/buffer.proto) as an example of `v2`. -1. Place the v2 extension configuration `.proto` in `api/envoy/config`, e.g. - `api/envoy/config/filter/http/foobar/v2/foobar.proto` together with an initial BUILD file: + below. Refer to the [Cache filter config](envoy/extensions/filter/http/cache/v3alpha/cache.proto) + as an example of `v3alpha`, and the + [Buffer filter config](envoy/extensions/filter/http/buffer/v3/buffer.proto) as an example of `v3`. +1. Place the v3 extension configuration `.proto` in `api/envoy/extensions`, e.g. + `api/envoy/extensions/filter/http/foobar/v3/foobar.proto` together with an initial BUILD file: ```bazel load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") @@ -135,14 +117,16 @@ To add an extension config to the API, the steps below should be followed: deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ``` -1. Add to the v2 extension config proto `import "udpa/annotations/migrate.proto";` -1. Add to the v2 extension config proto a file level `option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.foobar.v3";`. - This places the filter in the correct [v3 package hierarchy](#package-organization). -1. If this is still WiP and subject to breaking changes, import - `udpa/annotations/status.proto` and set `option (udpa.annotations.file_status).work_in_progress = true;`. -1. Add a reference to the v2 extension config in (1) in [api/docs/BUILD](docs/BUILD). +1. Add to the v3 extension config proto `import "udpa/annotations/migrate.proto";` + and `import "udpa/annotations/status.proto";` +1. If this is still WiP and subject to breaking changes, set + `option (udpa.annotations.file_status).work_in_progress = true;`. +1. Add to the v3 extension config proto a file level + `option (udpa.annotations.file_status).package_version_status = ACTIVE;`. + This is required to automatically include the config proto in [api/versioning/BUILD](versioning/BUILD). +1. Add a reference to the v3 extension config in (1) in [api/versioning/BUILD](versioning/BUILD) under `active_protos`. 1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file, - reformat `foobar.proto` as needed and also generate the v3 extension config, + reformat `foobar.proto` as needed and also generate the v4alpha extension config (if needed), together with shadow API protos. 1. `git add api/ generated_api_shadow/` to add any new files to your Git index. diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 5d0217bd2be6..c275a8c65835 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "db4b343e48c1264bb4d9ff491b059300701dc7c7" # Jan 24, 2020 -UDPA_SHA256 = "800624f44592a24898f133e39ae7fbb7a6c4b85bdddd448185fb7e277f097a56" +UDPA_GIT_SHA = "e8cd3a4bb307e2c810cffff99f93e96e6d7fee85" # Mar 27, 2020 +UDPA_SHA256 = "1fd7857cb61daee7726fca8f4d55e4923774a8d00a53007a4093830dc0482685" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" diff --git a/api/docs/BUILD b/api/docs/BUILD deleted file mode 100644 index cc0accb8c162..000000000000 --- a/api/docs/BUILD +++ /dev/null @@ -1,130 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load("@rules_proto//proto:defs.bzl", "proto_library") - -package_group( - name = "docs", - packages = [ - "//docs", - ], -) - -# This is where you add protos that will participate in docs RST generation. -proto_library( - name = "protos", - visibility = ["//visibility:public"], - deps = [ - "//envoy/admin/v2alpha:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/auth:pkg", - "//envoy/api/v2/cluster:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/endpoint:pkg", - "//envoy/api/v2/listener:pkg", - "//envoy/api/v2/ratelimit:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/config/accesslog/v2:pkg", - "//envoy/config/bootstrap/v2:pkg", - "//envoy/config/cluster/aggregate/v2alpha:pkg", - "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg", - "//envoy/config/cluster/redis:pkg", - "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", - "//envoy/config/common/tap/v2alpha:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/config/filter/dubbo/router/v2alpha1:pkg", - "//envoy/config/filter/fault/v2:pkg", - "//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg", - "//envoy/config/filter/http/aws_lambda/v2alpha:pkg", - "//envoy/config/filter/http/aws_request_signing/v2alpha:pkg", - "//envoy/config/filter/http/buffer/v2:pkg", - "//envoy/config/filter/http/cache/v2alpha:pkg", - "//envoy/config/filter/http/compressor/v2:pkg", - "//envoy/config/filter/http/cors/v2:pkg", - "//envoy/config/filter/http/csrf/v2:pkg", - "//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg", - "//envoy/config/filter/http/dynamo/v2:pkg", - "//envoy/config/filter/http/ext_authz/v2:pkg", - "//envoy/config/filter/http/fault/v2:pkg", - "//envoy/config/filter/http/grpc_http1_bridge/v2:pkg", - "//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg", - "//envoy/config/filter/http/grpc_stats/v2alpha:pkg", - "//envoy/config/filter/http/grpc_web/v2:pkg", - "//envoy/config/filter/http/gzip/v2:pkg", - "//envoy/config/filter/http/header_to_metadata/v2:pkg", - "//envoy/config/filter/http/health_check/v2:pkg", - "//envoy/config/filter/http/ip_tagging/v2:pkg", - "//envoy/config/filter/http/jwt_authn/v2alpha:pkg", - "//envoy/config/filter/http/lua/v2:pkg", - "//envoy/config/filter/http/on_demand/v2:pkg", - "//envoy/config/filter/http/original_src/v2alpha1:pkg", - "//envoy/config/filter/http/rate_limit/v2:pkg", - "//envoy/config/filter/http/rbac/v2:pkg", - "//envoy/config/filter/http/router/v2:pkg", - "//envoy/config/filter/http/squash/v2:pkg", - "//envoy/config/filter/http/tap/v2alpha:pkg", - "//envoy/config/filter/http/transcoder/v2:pkg", - "//envoy/config/filter/http/wasm/v2:pkg", - "//envoy/config/filter/listener/http_inspector/v2:pkg", - "//envoy/config/filter/listener/original_dst/v2:pkg", - "//envoy/config/filter/listener/original_src/v2alpha1:pkg", - "//envoy/config/filter/listener/proxy_protocol/v2:pkg", - "//envoy/config/filter/listener/tls_inspector/v2:pkg", - "//envoy/config/filter/network/client_ssl_auth/v2:pkg", - "//envoy/config/filter/network/direct_response/v2:pkg", - "//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg", - "//envoy/config/filter/network/echo/v2:pkg", - "//envoy/config/filter/network/ext_authz/v2:pkg", - "//envoy/config/filter/network/http_connection_manager/v2:pkg", - "//envoy/config/filter/network/kafka_broker/v2alpha1:pkg", - "//envoy/config/filter/network/local_rate_limit/v2alpha:pkg", - "//envoy/config/filter/network/mongo_proxy/v2:pkg", - "//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg", - "//envoy/config/filter/network/rate_limit/v2:pkg", - "//envoy/config/filter/network/rbac/v2:pkg", - "//envoy/config/filter/network/redis_proxy/v2:pkg", - "//envoy/config/filter/network/sni_cluster/v2:pkg", - "//envoy/config/filter/network/tcp_proxy/v2:pkg", - "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", - "//envoy/config/filter/network/wasm/v2:pkg", - "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", - "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", - "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", - "//envoy/config/grpc_credential/v2alpha:pkg", - "//envoy/config/health_checker/redis/v2:pkg", - "//envoy/config/listener/v2:pkg", - "//envoy/config/metrics/v2:pkg", - "//envoy/config/overload/v2alpha:pkg", - "//envoy/config/ratelimit/v2:pkg", - "//envoy/config/rbac/v2:pkg", - "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", - "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", - "//envoy/config/retry/omit_canary_hosts/v2:pkg", - "//envoy/config/retry/omit_host_metadata/v2:pkg", - "//envoy/config/retry/previous_hosts/v2:pkg", - "//envoy/config/retry/previous_priorities:pkg", - "//envoy/config/trace/v2:pkg", - "//envoy/config/trace/v2alpha:pkg", - "//envoy/config/transport_socket/alts/v2alpha:pkg", - "//envoy/config/transport_socket/raw_buffer/v2:pkg", - "//envoy/config/transport_socket/tap/v2alpha:pkg", - "//envoy/config/wasm/v2:pkg", - "//envoy/data/accesslog/v2:pkg", - "//envoy/data/cluster/v2alpha:pkg", - "//envoy/data/core/v2alpha:pkg", - "//envoy/data/tap/v2alpha:pkg", - "//envoy/service/accesslog/v2:pkg", - "//envoy/service/auth/v2:pkg", - "//envoy/service/discovery/v2:pkg", - "//envoy/service/load_stats/v2:pkg", - "//envoy/service/metrics/v2:pkg", - "//envoy/service/ratelimit/v2:pkg", - "//envoy/service/status/v2:pkg", - "//envoy/service/tap/v2alpha:pkg", - "//envoy/service/trace/v2:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "//envoy/type/metadata/v2:pkg", - "//envoy/type/tracing/v2:pkg", - ], -) diff --git a/api/envoy/admin/v2alpha/BUILD b/api/envoy/admin/v2alpha/BUILD index a7253df510f8..1d38be06555c 100644 --- a/api/envoy/admin/v2alpha/BUILD +++ b/api/envoy/admin/v2alpha/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/config/bootstrap/v2:pkg", "//envoy/service/tap/v2alpha:pkg", "//envoy/type:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/admin/v2alpha/certs.proto b/api/envoy/admin/v2alpha/certs.proto index df84f0b5d7eb..c7b568ca1e58 100644 --- a/api/envoy/admin/v2alpha/certs.proto +++ b/api/envoy/admin/v2alpha/certs.proto @@ -4,9 +4,12 @@ package envoy.admin.v2alpha; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "CertsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Certificates] diff --git a/api/envoy/admin/v2alpha/clusters.proto b/api/envoy/admin/v2alpha/clusters.proto index 4a05a40fe3ca..3b7ec029aa63 100644 --- a/api/envoy/admin/v2alpha/clusters.proto +++ b/api/envoy/admin/v2alpha/clusters.proto @@ -8,9 +8,12 @@ import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/health_check.proto"; import "envoy/type/percent.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ClustersProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Clusters] diff --git a/api/envoy/admin/v2alpha/config_dump.proto b/api/envoy/admin/v2alpha/config_dump.proto index e5ddc6cce62a..833c015fb474 100644 --- a/api/envoy/admin/v2alpha/config_dump.proto +++ b/api/envoy/admin/v2alpha/config_dump.proto @@ -7,9 +7,12 @@ import "envoy/config/bootstrap/v2/bootstrap.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ConfigDumpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: ConfigDump] diff --git a/api/envoy/admin/v2alpha/listeners.proto b/api/envoy/admin/v2alpha/listeners.proto index 8fee45093065..ca7b736521d0 100644 --- a/api/envoy/admin/v2alpha/listeners.proto +++ b/api/envoy/admin/v2alpha/listeners.proto @@ -4,9 +4,12 @@ package envoy.admin.v2alpha; import "envoy/api/v2/core/address.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ListenersProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listeners] diff --git a/api/envoy/admin/v2alpha/memory.proto b/api/envoy/admin/v2alpha/memory.proto index 1544cd111dfd..85fd2169d6d7 100644 --- a/api/envoy/admin/v2alpha/memory.proto +++ b/api/envoy/admin/v2alpha/memory.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.admin.v2alpha; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "MemoryProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Memory] diff --git a/api/envoy/admin/v2alpha/metrics.proto b/api/envoy/admin/v2alpha/metrics.proto index 79c15f72b2ec..15ad219c13e5 100644 --- a/api/envoy/admin/v2alpha/metrics.proto +++ b/api/envoy/admin/v2alpha/metrics.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.admin.v2alpha; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "MetricsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metrics] diff --git a/api/envoy/admin/v2alpha/mutex_stats.proto b/api/envoy/admin/v2alpha/mutex_stats.proto index 1b725a11143a..22c65f3de5a6 100644 --- a/api/envoy/admin/v2alpha/mutex_stats.proto +++ b/api/envoy/admin/v2alpha/mutex_stats.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.admin.v2alpha; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "MutexStatsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: MutexStats] diff --git a/api/envoy/admin/v2alpha/server_info.proto b/api/envoy/admin/v2alpha/server_info.proto index 1052cb6296ee..b9db6bbc1e1f 100644 --- a/api/envoy/admin/v2alpha/server_info.proto +++ b/api/envoy/admin/v2alpha/server_info.proto @@ -5,10 +5,12 @@ package envoy.admin.v2alpha; import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ServerInfoProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Server State] diff --git a/api/envoy/admin/v2alpha/tap.proto b/api/envoy/admin/v2alpha/tap.proto index d16ffdd711db..6335b4db6284 100644 --- a/api/envoy/admin/v2alpha/tap.proto +++ b/api/envoy/admin/v2alpha/tap.proto @@ -4,11 +4,13 @@ package envoy.admin.v2alpha; import "envoy/service/tap/v2alpha/common.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap] diff --git a/api/envoy/admin/v3/certs.proto b/api/envoy/admin/v3/certs.proto index 1b28c2eb5e2c..158c8aead28f 100644 --- a/api/envoy/admin/v3/certs.proto +++ b/api/envoy/admin/v3/certs.proto @@ -4,11 +4,13 @@ package envoy.admin.v3; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "CertsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Certificates] diff --git a/api/envoy/admin/v3/clusters.proto b/api/envoy/admin/v3/clusters.proto index 47f8df0852b1..fc05c8a10de2 100644 --- a/api/envoy/admin/v3/clusters.proto +++ b/api/envoy/admin/v3/clusters.proto @@ -8,11 +8,13 @@ import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/type/v3/percent.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ClustersProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Clusters] diff --git a/api/envoy/admin/v3/config_dump.proto b/api/envoy/admin/v3/config_dump.proto index 6e2e5952b3e5..b3c3836a8cc0 100644 --- a/api/envoy/admin/v3/config_dump.proto +++ b/api/envoy/admin/v3/config_dump.proto @@ -7,11 +7,13 @@ import "envoy/config/bootstrap/v3/bootstrap.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ConfigDumpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: ConfigDump] diff --git a/api/envoy/admin/v3/listeners.proto b/api/envoy/admin/v3/listeners.proto index 690d1a4d27f5..6197a44e4243 100644 --- a/api/envoy/admin/v3/listeners.proto +++ b/api/envoy/admin/v3/listeners.proto @@ -4,11 +4,13 @@ package envoy.admin.v3; import "envoy/config/core/v3/address.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ListenersProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listeners] diff --git a/api/envoy/admin/v3/memory.proto b/api/envoy/admin/v3/memory.proto index 44ef011e4cf5..bcf9f271748d 100644 --- a/api/envoy/admin/v3/memory.proto +++ b/api/envoy/admin/v3/memory.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.admin.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "MemoryProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Memory] diff --git a/api/envoy/admin/v3/metrics.proto b/api/envoy/admin/v3/metrics.proto index 3a2bd8f27f12..71592ac1e9ec 100644 --- a/api/envoy/admin/v3/metrics.proto +++ b/api/envoy/admin/v3/metrics.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.admin.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "MetricsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metrics] diff --git a/api/envoy/admin/v3/mutex_stats.proto b/api/envoy/admin/v3/mutex_stats.proto index d0a2ca08efff..49965d87ae80 100644 --- a/api/envoy/admin/v3/mutex_stats.proto +++ b/api/envoy/admin/v3/mutex_stats.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.admin.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "MutexStatsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: MutexStats] diff --git a/api/envoy/admin/v3/server_info.proto b/api/envoy/admin/v3/server_info.proto index 126f10a048c4..ac0204428053 100644 --- a/api/envoy/admin/v3/server_info.proto +++ b/api/envoy/admin/v3/server_info.proto @@ -4,13 +4,14 @@ package envoy.admin.v3; import "google/protobuf/duration.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ServerInfoProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Server State] diff --git a/api/envoy/admin/v3/tap.proto b/api/envoy/admin/v3/tap.proto index 094753a6a490..ca7ab4405a9b 100644 --- a/api/envoy/admin/v3/tap.proto +++ b/api/envoy/admin/v3/tap.proto @@ -4,13 +4,14 @@ package envoy.admin.v3; import "envoy/config/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap] diff --git a/api/envoy/admin/v4alpha/BUILD b/api/envoy/admin/v4alpha/BUILD new file mode 100644 index 000000000000..6da5b60bad28 --- /dev/null +++ b/api/envoy/admin/v4alpha/BUILD @@ -0,0 +1,17 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/admin/v3:pkg", + "//envoy/annotations:pkg", + "//envoy/config/bootstrap/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/tap/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/admin/v4alpha/certs.proto b/api/envoy/admin/v4alpha/certs.proto new file mode 100644 index 000000000000..585b09bccf4c --- /dev/null +++ b/api/envoy/admin/v4alpha/certs.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "google/protobuf/timestamp.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "CertsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Certificates] + +// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to +// display certificate information. See :ref:`/certs ` for more +// information. +message Certificates { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificates"; + + // List of certificates known to an Envoy. + repeated Certificate certificates = 1; +} + +message Certificate { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificate"; + + // Details of CA certificate. + repeated CertificateDetails ca_cert = 1; + + // Details of Certificate Chain + repeated CertificateDetails cert_chain = 2; +} + +// [#next-free-field: 7] +message CertificateDetails { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CertificateDetails"; + + // Path of the certificate. + string path = 1; + + // Certificate Serial Number. + string serial_number = 2; + + // List of Subject Alternate names. + repeated SubjectAlternateName subject_alt_names = 3; + + // Minimum of days until expiration of certificate and it's chain. + uint64 days_until_expiration = 4; + + // Indicates the time from which the certificate is valid. + google.protobuf.Timestamp valid_from = 5; + + // Indicates the time at which the certificate expires. + google.protobuf.Timestamp expiration_time = 6; +} + +message SubjectAlternateName { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.SubjectAlternateName"; + + // Subject Alternate Name. + oneof name { + string dns = 1; + + string uri = 2; + + string ip_address = 3; + } +} diff --git a/api/envoy/admin/v4alpha/clusters.proto b/api/envoy/admin/v4alpha/clusters.proto new file mode 100644 index 000000000000..9056262cae86 --- /dev/null +++ b/api/envoy/admin/v4alpha/clusters.proto @@ -0,0 +1,162 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "envoy/admin/v4alpha/metrics.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/health_check.proto"; +import "envoy/type/v3/percent.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "ClustersProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Clusters] + +// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. +// See :ref:`/clusters ` for more information. +message Clusters { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Clusters"; + + // Mapping from cluster name to each cluster's status. + repeated ClusterStatus cluster_statuses = 1; +} + +// Details an individual cluster's current status. +// [#next-free-field: 6] +message ClusterStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClusterStatus"; + + // Name of the cluster. + string name = 1; + + // Denotes whether this cluster was added via API or configured statically. + bool added_via_api = 2; + + // The success rate threshold used in the last interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *false*, all errors: externally and locally generated were used to calculate the threshold. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*, only externally generated errors were used to calculate the threshold. + // The threshold is used to eject hosts based on their success rate. See + // :ref:`Cluster outlier detection ` documentation for details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + type.v3.Percent success_rate_ejection_threshold = 3; + + // Mapping from host address to the host's current status. + repeated HostStatus host_statuses = 4; + + // The success rate threshold used in the last interval when only locally originated failures were + // taken into account and externally originated errors were treated as success. + // This field should be interpreted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*. The threshold is used to eject hosts based on their success rate. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + type.v3.Percent local_origin_success_rate_ejection_threshold = 5; +} + +// Current state of a particular host. +// [#next-free-field: 10] +message HostStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostStatus"; + + // Address of this host. + config.core.v4alpha.Address address = 1; + + // List of stats specific to this host. + repeated SimpleMetric stats = 2; + + // The host's current health status. + HostHealthStatus health_status = 3; + + // Request success rate for this host over the last calculated interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *false*, all errors: externally and locally generated were used in success rate + // calculation. If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*, only externally generated errors were used in success rate calculation. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + type.v3.Percent success_rate = 4; + + // The host's weight. If not configured, the value defaults to 1. + uint32 weight = 5; + + // The hostname of the host, if applicable. + string hostname = 6; + + // The host's priority. If not configured, the value defaults to 0 (highest priority). + uint32 priority = 7; + + // Request success rate for this host over the last calculated + // interval when only locally originated errors are taken into account and externally originated + // errors were treated as success. + // This field should be interpreted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + type.v3.Percent local_origin_success_rate = 8; + + // locality of the host. + config.core.v4alpha.Locality locality = 9; +} + +// Health status for a host. +// [#next-free-field: 7] +message HostHealthStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostHealthStatus"; + + // The host is currently failing active health checks. + bool failed_active_health_check = 1; + + // The host is currently considered an outlier and has been ejected. + bool failed_outlier_check = 2; + + // The host is currently being marked as degraded through active health checking. + bool failed_active_degraded_check = 4; + + // The host has been removed from service discovery, but is being stabilized due to active + // health checking. + bool pending_dynamic_removal = 5; + + // The host has not yet been health checked. + bool pending_active_hc = 6; + + // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported + // here. + // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] + config.core.v4alpha.HealthStatus eds_health_status = 3; +} diff --git a/api/envoy/admin/v4alpha/config_dump.proto b/api/envoy/admin/v4alpha/config_dump.proto new file mode 100644 index 000000000000..02709a414506 --- /dev/null +++ b/api/envoy/admin/v4alpha/config_dump.proto @@ -0,0 +1,342 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "envoy/config/bootstrap/v4alpha/bootstrap.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "ConfigDumpProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: ConfigDump] + +// The :ref:`/config_dump ` admin endpoint uses this wrapper +// message to maintain and serve arbitrary configuration information from any component in Envoy. +message ConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ConfigDump"; + + // This list is serialized and dumped in its entirety at the + // :ref:`/config_dump ` endpoint. + // + // The following configurations are currently supported and will be dumped in the order given + // below: + // + // * *bootstrap*: :ref:`BootstrapConfigDump ` + // * *clusters*: :ref:`ClustersConfigDump ` + // * *listeners*: :ref:`ListenersConfigDump ` + // * *routes*: :ref:`RoutesConfigDump ` + // + // You can filter output with the resource and mask query parameters. + // See :ref:`/config_dump?resource={} `, + // :ref:`/config_dump?mask={} `, + // or :ref:`/config_dump?resource={},mask={} + // ` for more information. + repeated google.protobuf.Any configs = 1; +} + +message UpdateFailureState { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.UpdateFailureState"; + + // What the component configuration would have been if the update had succeeded. + google.protobuf.Any failed_configuration = 1; + + // Time of the latest failed update attempt. + google.protobuf.Timestamp last_update_attempt = 2; + + // Details about the last failed update attempt. + string details = 3; +} + +// This message describes the bootstrap configuration that Envoy was started with. This includes +// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate +// the static portions of an Envoy configuration by reusing the output as the bootstrap +// configuration for another Envoy. +message BootstrapConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.BootstrapConfigDump"; + + config.bootstrap.v4alpha.Bootstrap bootstrap = 1; + + // The timestamp when the BootstrapConfig was last updated. + google.protobuf.Timestamp last_updated = 2; +} + +// Envoy's listener manager fills this message with all currently known listeners. Listener +// configuration information can be used to recreate an Envoy configuration by populating all +// listeners as static listeners or by returning them in a LDS response. +message ListenersConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenersConfigDump"; + + // Describes a statically loaded listener. + message StaticListener { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ListenersConfigDump.StaticListener"; + + // The listener config. + google.protobuf.Any listener = 1; + + // The timestamp when the Listener was last successfully updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicListenerState { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ListenersConfigDump.DynamicListenerState"; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the listener was loaded. In the future, discrete per-listener versions may be supported + // by the API. + string version_info = 1; + + // The listener config. + google.protobuf.Any listener = 2; + + // The timestamp when the Listener was last successfully updated. + google.protobuf.Timestamp last_updated = 3; + } + + // Describes a dynamically loaded listener via the LDS API. + // [#next-free-field: 6] + message DynamicListener { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ListenersConfigDump.DynamicListener"; + + // The name or unique id of this listener, pulled from the DynamicListenerState config. + string name = 1; + + // The listener state for any active listener by this name. + // These are listeners that are available to service data plane traffic. + DynamicListenerState active_state = 2; + + // The listener state for any warming listener by this name. + // These are listeners that are currently undergoing warming in preparation to service data + // plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the warming listeners should generally be discarded. + DynamicListenerState warming_state = 3; + + // The listener state for any draining listener by this name. + // These are listeners that are currently undergoing draining in preparation to stop servicing + // data plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the draining listeners should generally be discarded. + DynamicListenerState draining_state = 4; + + // Set if the last update failed, cleared after the next successful update. + UpdateFailureState error_state = 5; + } + + // This is the :ref:`version_info ` in the + // last processed LDS discovery response. If there are only static bootstrap listeners, this field + // will be "". + string version_info = 1; + + // The statically loaded listener configs. + repeated StaticListener static_listeners = 2; + + // State for any warming, active, or draining listeners. + repeated DynamicListener dynamic_listeners = 3; +} + +// Envoy's cluster manager fills this message with all currently known clusters. Cluster +// configuration information can be used to recreate an Envoy configuration by populating all +// clusters as static clusters or by returning them in a CDS response. +message ClustersConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClustersConfigDump"; + + // Describes a statically loaded cluster. + message StaticCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ClustersConfigDump.StaticCluster"; + + // The cluster config. + google.protobuf.Any cluster = 1; + + // The timestamp when the Cluster was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + // Describes a dynamically loaded cluster via the CDS API. + message DynamicCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ClustersConfigDump.DynamicCluster"; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by + // the API. + string version_info = 1; + + // The cluster config. + google.protobuf.Any cluster = 2; + + // The timestamp when the Cluster was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // This is the :ref:`version_info ` in the + // last processed CDS discovery response. If there are only static bootstrap clusters, this field + // will be "". + string version_info = 1; + + // The statically loaded cluster configs. + repeated StaticCluster static_clusters = 2; + + // The dynamically loaded active clusters. These are clusters that are available to service + // data plane traffic. + repeated DynamicCluster dynamic_active_clusters = 3; + + // The dynamically loaded warming clusters. These are clusters that are currently undergoing + // warming in preparation to service data plane traffic. Note that if attempting to recreate an + // Envoy configuration from a configuration dump, the warming clusters should generally be + // discarded. + repeated DynamicCluster dynamic_warming_clusters = 4; +} + +// Envoy's RDS implementation fills this message with all currently loaded routes, as described by +// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration +// or defined inline while configuring listeners are separated from those configured dynamically via RDS. +// Route configuration information can be used to recreate an Envoy configuration by populating all routes +// as static routes or by returning them in RDS responses. +message RoutesConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.RoutesConfigDump"; + + message StaticRouteConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.RoutesConfigDump.StaticRouteConfig"; + + // The route config. + google.protobuf.Any route_config = 1; + + // The timestamp when the Route was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicRouteConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig"; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the route configuration was loaded. + string version_info = 1; + + // The route config. + google.protobuf.Any route_config = 2; + + // The timestamp when the Route was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded route configs. + repeated StaticRouteConfig static_route_configs = 2; + + // The dynamically loaded route configs. + repeated DynamicRouteConfig dynamic_route_configs = 3; +} + +// Envoy's scoped RDS implementation fills this message with all currently loaded route +// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both +// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the +// dynamically obtained scopes via the SRDS API. +message ScopedRoutesConfigDump { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ScopedRoutesConfigDump"; + + message InlineScopedRouteConfigs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs"; + + // The name assigned to the scoped route configurations. + string name = 1; + + // The scoped route configurations. + repeated google.protobuf.Any scoped_route_configs = 2; + + // The timestamp when the scoped route config set was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + message DynamicScopedRouteConfigs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs"; + + // The name assigned to the scoped route configurations. + string name = 1; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the scoped routes configuration was loaded. + string version_info = 2; + + // The scoped route configurations. + repeated google.protobuf.Any scoped_route_configs = 3; + + // The timestamp when the scoped route config set was last updated. + google.protobuf.Timestamp last_updated = 4; + } + + // The statically loaded scoped route configs. + repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; + + // The dynamically loaded scoped route configs. + repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; +} + +// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. +message SecretsConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SecretsConfigDump"; + + // DynamicSecret contains secret information fetched via SDS. + message DynamicSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.SecretsConfigDump.DynamicSecret"; + + // The name assigned to the secret. + string name = 1; + + // This is the per-resource version information. + string version_info = 2; + + // The timestamp when the secret was last updated. + google.protobuf.Timestamp last_updated = 3; + + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + google.protobuf.Any secret = 4; + } + + // StaticSecret specifies statically loaded secret in bootstrap. + message StaticSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.SecretsConfigDump.StaticSecret"; + + // The name assigned to the secret. + string name = 1; + + // The timestamp when the secret was last updated. + google.protobuf.Timestamp last_updated = 2; + + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + google.protobuf.Any secret = 3; + } + + // The statically loaded secrets. + repeated StaticSecret static_secrets = 1; + + // The dynamically loaded active secrets. These are secrets that are available to service + // clusters or listeners. + repeated DynamicSecret dynamic_active_secrets = 2; + + // The dynamically loaded warming secrets. These are secrets that are currently undergoing + // warming in preparation to service clusters or listeners. + repeated DynamicSecret dynamic_warming_secrets = 3; +} diff --git a/api/envoy/admin/v4alpha/listeners.proto b/api/envoy/admin/v4alpha/listeners.proto new file mode 100644 index 000000000000..89bdc4c5bbf8 --- /dev/null +++ b/api/envoy/admin/v4alpha/listeners.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "ListenersProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listeners] + +// Admin endpoint uses this wrapper for `/listeners` to display listener status information. +// See :ref:`/listeners ` for more information. +message Listeners { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Listeners"; + + // List of listener statuses. + repeated ListenerStatus listener_statuses = 1; +} + +// Details an individual listener's current status. +message ListenerStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenerStatus"; + + // Name of the listener + string name = 1; + + // The actual local address that the listener is listening on. If a listener was configured + // to listen on port 0, then this address has the port that was allocated by the OS. + config.core.v4alpha.Address local_address = 2; +} diff --git a/api/envoy/admin/v4alpha/memory.proto b/api/envoy/admin/v4alpha/memory.proto new file mode 100644 index 000000000000..d2f0b57229ce --- /dev/null +++ b/api/envoy/admin/v4alpha/memory.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "MemoryProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Memory] + +// Proto representation of the internal memory consumption of an Envoy instance. These represent +// values extracted from an internal TCMalloc instance. For more information, see the section of the +// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). +// [#next-free-field: 7] +message Memory { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Memory"; + + // The number of bytes allocated by the heap for Envoy. This is an alias for + // `generic.current_allocated_bytes`. + uint64 allocated = 1; + + // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for + // `generic.heap_size`. + uint64 heap_size = 2; + + // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards + // virtual memory usage, and depending on the OS, typically do not count towards physical memory + // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. + uint64 pageheap_unmapped = 3; + + // The number of bytes in free, mapped pages in the page heap. These bytes always count towards + // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also + // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. + uint64 pageheap_free = 4; + + // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias + // for `tcmalloc.current_total_thread_cache_bytes`. + uint64 total_thread_cache = 5; + + // The number of bytes of the physical memory usage by the allocator. This is an alias for + // `generic.total_physical_bytes`. + uint64 total_physical_bytes = 6; +} diff --git a/api/envoy/admin/v4alpha/metrics.proto b/api/envoy/admin/v4alpha/metrics.proto new file mode 100644 index 000000000000..78613320038b --- /dev/null +++ b/api/envoy/admin/v4alpha/metrics.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "MetricsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metrics] + +// Proto representation of an Envoy Counter or Gauge value. +message SimpleMetric { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SimpleMetric"; + + enum Type { + COUNTER = 0; + GAUGE = 1; + } + + // Type of the metric represented. + Type type = 1; + + // Current metric value. + uint64 value = 2; + + // Name of the metric. + string name = 3; +} diff --git a/api/envoy/admin/v4alpha/mutex_stats.proto b/api/envoy/admin/v4alpha/mutex_stats.proto new file mode 100644 index 000000000000..6f9fcd548cc0 --- /dev/null +++ b/api/envoy/admin/v4alpha/mutex_stats.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "MutexStatsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: MutexStats] + +// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run +// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` +// [docs](https://abseil.io/about/design/mutex#extra-features). +// +// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not +// correspond to core clock frequency. For more information, see the `CycleClock` +// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). +message MutexStats { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.MutexStats"; + + // The number of individual mutex contentions which have occurred since startup. + uint64 num_contentions = 1; + + // The length of the current contention wait cycle. + uint64 current_wait_cycles = 2; + + // The lifetime total of all contention wait cycles. + uint64 lifetime_wait_cycles = 3; +} diff --git a/api/envoy/admin/v4alpha/server_info.proto b/api/envoy/admin/v4alpha/server_info.proto new file mode 100644 index 000000000000..867a9255bc51 --- /dev/null +++ b/api/envoy/admin/v4alpha/server_info.proto @@ -0,0 +1,155 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "google/protobuf/duration.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "ServerInfoProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Server State] + +// Proto representation of the value returned by /server_info, containing +// server version/server status information. +// [#next-free-field: 7] +message ServerInfo { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ServerInfo"; + + enum State { + // Server is live and serving traffic. + LIVE = 0; + + // Server is draining listeners in response to external health checks failing. + DRAINING = 1; + + // Server has not yet completed cluster manager initialization. + PRE_INITIALIZING = 2; + + // Server is running the cluster manager initialization callbacks (e.g., RDS). + INITIALIZING = 3; + } + + // Server version. + string version = 1; + + // State of the server. + State state = 2; + + // Uptime since current epoch was started. + google.protobuf.Duration uptime_current_epoch = 3; + + // Uptime since the start of the first epoch. + google.protobuf.Duration uptime_all_epochs = 4; + + // Hot restart version. + string hot_restart_version = 5; + + // Command line options the server is currently running with. + CommandLineOptions command_line_options = 6; +} + +// [#next-free-field: 29] +message CommandLineOptions { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; + + enum IpVersion { + v4 = 0; + v6 = 1; + } + + enum Mode { + // Validate configs and then serve traffic normally. + Serve = 0; + + // Validate configs and exit. + Validate = 1; + + // Completely load and initialize the config, and then exit without running the listener loop. + InitOnly = 2; + } + + reserved 12, 20, 21; + + reserved "max_stats", "max_obj_name_len"; + + // See :option:`--base-id` for details. + uint64 base_id = 1; + + // See :option:`--concurrency` for details. + uint32 concurrency = 2; + + // See :option:`--config-path` for details. + string config_path = 3; + + // See :option:`--config-yaml` for details. + string config_yaml = 4; + + // See :option:`--allow-unknown-static-fields` for details. + bool allow_unknown_static_fields = 5; + + // See :option:`--reject-unknown-dynamic-fields` for details. + bool reject_unknown_dynamic_fields = 26; + + // See :option:`--admin-address-path` for details. + string admin_address_path = 6; + + // See :option:`--local-address-ip-version` for details. + IpVersion local_address_ip_version = 7; + + // See :option:`--log-level` for details. + string log_level = 8; + + // See :option:`--component-log-level` for details. + string component_log_level = 9; + + // See :option:`--log-format` for details. + string log_format = 10; + + // See :option:`--log-format-escaped` for details. + bool log_format_escaped = 27; + + // See :option:`--log-path` for details. + string log_path = 11; + + // See :option:`--service-cluster` for details. + string service_cluster = 13; + + // See :option:`--service-node` for details. + string service_node = 14; + + // See :option:`--service-zone` for details. + string service_zone = 15; + + // See :option:`--file-flush-interval-msec` for details. + google.protobuf.Duration file_flush_interval = 16; + + // See :option:`--drain-time-s` for details. + google.protobuf.Duration drain_time = 17; + + // See :option:`--parent-shutdown-time-s` for details. + google.protobuf.Duration parent_shutdown_time = 18; + + // See :option:`--mode` for details. + Mode mode = 19; + + // See :option:`--disable-hot-restart` for details. + bool disable_hot_restart = 22; + + // See :option:`--enable-mutex-tracing` for details. + bool enable_mutex_tracing = 23; + + // See :option:`--restart-epoch` for details. + uint32 restart_epoch = 24; + + // See :option:`--cpuset-threads` for details. + bool cpuset_threads = 25; + + // See :option:`--disable-extensions` for details. + repeated string disabled_extensions = 28; +} diff --git a/api/envoy/admin/v4alpha/tap.proto b/api/envoy/admin/v4alpha/tap.proto new file mode 100644 index 000000000000..c47b308d6ee6 --- /dev/null +++ b/api/envoy/admin/v4alpha/tap.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "envoy/config/tap/v3/common.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap] + +// The /tap admin request body that is used to configure an active tap session. +message TapRequest { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.TapRequest"; + + // The opaque configuration ID used to match the configuration to a loaded extension. + // A tap extension configures a similar opaque ID that is used to match. + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The tap configuration to load. + config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/api/v2/BUILD b/api/envoy/api/v2/BUILD index cc6f9e77c192..46f8d16dfbd7 100644 --- a/api/envoy/api/v2/BUILD +++ b/api/envoy/api/v2/BUILD @@ -14,6 +14,7 @@ api_proto_package( "//envoy/api/v2/endpoint:pkg", "//envoy/api/v2/listener:pkg", "//envoy/api/v2/route:pkg", + "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/listener/v2:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/api/v2/auth/cert.proto b/api/envoy/api/v2/auth/cert.proto index cdb6a3d168b0..a1642318e043 100644 --- a/api/envoy/api/v2/auth/cert.proto +++ b/api/envoy/api/v2/auth/cert.proto @@ -11,9 +11,9 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/sensitive.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; @@ -21,6 +21,7 @@ option java_outer_classname = "CertProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common TLS configuration] @@ -414,7 +415,7 @@ message UpstreamTlsContext { google.protobuf.UInt32Value max_session_keys = 4; } -// [#next-free-field: 7] +// [#next-free-field: 8] message DownstreamTlsContext { // Common TLS context settings. CommonTlsContext common_tls_context = 1; @@ -433,6 +434,16 @@ message DownstreamTlsContext { // Config for fetching TLS session ticket keys via SDS API. SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; } // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto index dcd5c3fd0fb0..0b657a0fa452 100644 --- a/api/envoy/api/v2/cds.proto +++ b/api/envoy/api/v2/cds.proto @@ -8,6 +8,7 @@ import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import public "envoy/api/v2/cluster.proto"; @@ -16,6 +17,7 @@ option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: CDS] diff --git a/api/envoy/api/v2/cluster.proto b/api/envoy/api/v2/cluster.proto index 55324ff60060..5de5c20df570 100644 --- a/api/envoy/api/v2/cluster.proto +++ b/api/envoy/api/v2/cluster.proto @@ -21,12 +21,14 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Cluster configuration] @@ -354,7 +356,7 @@ message Cluster { } // Common configuration for all load balancer implementations. - // [#next-free-field: 7] + // [#next-free-field: 8] message CommonLbConfig { // Configuration for :ref:`zone aware routing // `. @@ -384,6 +386,13 @@ message Cluster { message LocalityWeightedLbConfig { } + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + message ConsistentHashingLbConfig { + // If set to `true`, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + bool use_hostname_for_hashing = 1; + } + // Configures the :ref:`healthy panic threshold `. // If not specified, the default is 50%. // To disable panic mode, set to 0%. @@ -438,6 +447,9 @@ message Cluster { // If set to `true`, the cluster manager will drain all existing // connections to upstream hosts whenever hosts are added or removed from the cluster. bool close_connections_on_host_set_change = 6; + + //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig consistent_hashing_lb_config = 7; } message RefreshRate { diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto index 893d1f1aa168..510619b26429 100644 --- a/api/envoy/api/v2/cluster/circuit_breaker.proto +++ b/api/envoy/api/v2/cluster/circuit_breaker.proto @@ -8,6 +8,7 @@ import "envoy/type/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; @@ -16,6 +17,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Circuit breakers] diff --git a/api/envoy/api/v2/cluster/filter.proto b/api/envoy/api/v2/cluster/filter.proto index 67f3c3ba5e09..b87ad79d8f35 100644 --- a/api/envoy/api/v2/cluster/filter.proto +++ b/api/envoy/api/v2/cluster/filter.proto @@ -5,6 +5,7 @@ package envoy.api.v2.cluster; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; @@ -13,6 +14,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Upstream filters] // Upstream filters apply to the connections to the upstream cluster hosts. diff --git a/api/envoy/api/v2/cluster/outlier_detection.proto b/api/envoy/api/v2/cluster/outlier_detection.proto index 0cc638ceb493..6cf35e41ff15 100644 --- a/api/envoy/api/v2/cluster/outlier_detection.proto +++ b/api/envoy/api/v2/cluster/outlier_detection.proto @@ -6,6 +6,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; @@ -14,6 +15,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Outlier detection] diff --git a/api/envoy/api/v2/core/address.proto b/api/envoy/api/v2/core/address.proto index e5c1f1c9d48e..804da539583b 100644 --- a/api/envoy/api/v2/core/address.proto +++ b/api/envoy/api/v2/core/address.proto @@ -7,12 +7,14 @@ import "envoy/api/v2/core/socket_option.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "AddressProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Network addresses] diff --git a/api/envoy/api/v2/core/backoff.proto b/api/envoy/api/v2/core/backoff.proto index b46791500297..e45c71e39be8 100644 --- a/api/envoy/api/v2/core/backoff.proto +++ b/api/envoy/api/v2/core/backoff.proto @@ -5,12 +5,14 @@ package envoy.api.v2.core; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "BackoffProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Backoff Strategy] diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto index d10163b3bdf6..b7145d77efd3 100644 --- a/api/envoy/api/v2/core/base.proto +++ b/api/envoy/api/v2/core/base.proto @@ -14,6 +14,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/core/socket_option.proto"; @@ -22,6 +23,7 @@ option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "BaseProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common types] @@ -230,6 +232,15 @@ message RuntimeUInt32 { string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; } +// Runtime derived double with a default when not specified. +message RuntimeDouble { + // Default value if runtime value is not available. + double default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; +} + // Runtime derived bool with a default when not specified. message RuntimeFeatureFlag { // Default value if runtime value is not available. diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index 60949ca1c8e5..fa42a7aeec1c 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -9,12 +9,14 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "ConfigSourceProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Configuration sources] @@ -133,6 +135,8 @@ message ConfigSource { option (validate.required) = true; // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for :ref:`secret `, + // the certificate and key files are also watched for updates. // // .. note:: // diff --git a/api/envoy/api/v2/core/event_service_config.proto b/api/envoy/api/v2/core/event_service_config.proto new file mode 100644 index 000000000000..f822f8c6b630 --- /dev/null +++ b/api/envoy/api/v2/core/event_service_config.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.api.v2.core; + +import "envoy/api/v2/core/grpc_service.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "EventServiceConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#not-implemented-hide:] +// Configuration of the event reporting service endpoint. +message EventServiceConfig { + oneof config_source_specifier { + option (validate.required) = true; + + // Specifies the gRPC service that hosts the event reporting service. + GrpcService grpc_service = 1; + } +} diff --git a/api/envoy/api/v2/core/grpc_method_list.proto b/api/envoy/api/v2/core/grpc_method_list.proto new file mode 100644 index 000000000000..3d646484b359 --- /dev/null +++ b/api/envoy/api/v2/core/grpc_method_list.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.api.v2.core; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "GrpcMethodListProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: gRPC method list] + +// A list of gRPC methods which can be used as an allowlist, for example. +message GrpcMethodList { + message Service { + // The name of the gRPC service. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The names of the gRPC methods in this service. + repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; + } + + repeated Service services = 1; +} diff --git a/api/envoy/api/v2/core/grpc_service.proto b/api/envoy/api/v2/core/grpc_service.proto index 6fda81e3a209..dd789644e1d7 100644 --- a/api/envoy/api/v2/core/grpc_service.proto +++ b/api/envoy/api/v2/core/grpc_service.proto @@ -9,15 +9,16 @@ import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; -import "udpa/annotations/sensitive.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "GrpcServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC services] diff --git a/api/envoy/api/v2/core/health_check.proto b/api/envoy/api/v2/core/health_check.proto index 91aeb76b8b42..bc4ae3e5c866 100644 --- a/api/envoy/api/v2/core/health_check.proto +++ b/api/envoy/api/v2/core/health_check.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.api.v2.core; import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/event_service_config.proto"; import "envoy/type/http.proto"; import "envoy/type/matcher/string.proto"; import "envoy/type/range.proto"; @@ -14,12 +15,14 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health check] // * Health checking :ref:`architecture overview `. @@ -52,7 +55,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 22] +// [#next-free-field: 23] message HealthCheck { // Describes the encoding of the payload bytes in the payload. message Payload { @@ -71,7 +74,8 @@ message HealthCheck { message HttpHealthCheck { // The value of the host header in the HTTP health check request. If // left empty (default value), the name of the cluster this health check is associated - // with will be used. + // with will be used. The host header can be customized for a specific endpoint by setting the + // :ref:`hostname ` field. string host = 1; // Specifies the HTTP path that will be requested during health checking. For example @@ -158,7 +162,8 @@ message HealthCheck { // The value of the :authority header in the gRPC health check request. If // left empty (default value), the name of the cluster this health check is associated - // with will be used. + // with will be used. The authority header can be customized for a specific endpoint by setting + // the :ref:`hostname ` field. string authority = 2; } @@ -288,6 +293,11 @@ message HealthCheck { // If empty, no event log will be written. string event_log_path = 17; + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventServiceConfig event_service = 22; + // If set to true, health check failure events will always be logged. If set to false, only the // initial health check failure event will be logged. // The default value is false. diff --git a/api/envoy/api/v2/core/http_uri.proto b/api/envoy/api/v2/core/http_uri.proto index 5f740695dd80..cd1a0660e330 100644 --- a/api/envoy/api/v2/core/http_uri.proto +++ b/api/envoy/api/v2/core/http_uri.proto @@ -5,12 +5,14 @@ package envoy.api.v2.core; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "HttpUriProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP Service URI ] diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto index c8cfcf8260ac..5838ca744075 100644 --- a/api/envoy/api/v2/core/protocol.proto +++ b/api/envoy/api/v2/core/protocol.proto @@ -6,12 +6,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "ProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Protocol options] @@ -32,9 +34,30 @@ message UpstreamHttpProtocolOptions { bool auto_san_validation = 2; } +// [#next-free-field: 6] message HttpProtocolOptions { + // Action to take when Envoy receives client request with header names containing underscore + // characters. + // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + // characters. + enum HeadersWithUnderscoresAction { + // Allow headers with underscores. This is the default behavior. + ALLOW = 0; + + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + REJECT_REQUEST = 1; + + // Drop the header with name containing underscores. The header is dropped before the filter chain is + // invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + DROP_HEADER = 2; + } + // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. If not set, there is no idle timeout. When the + // period in which there are no active requests. When the // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 // downstream connection a drain sequence will occur prior to closing the connection, see // :ref:`drain_timeout @@ -65,6 +88,11 @@ message HttpProtocolOptions { // The current implementation implements this timeout on downstream connections only. // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; + + // Action to take when a client request with a header name containing underscore characters is received. + // If this setting is not specified, the value defaults to ALLOW. + // Note: upstream responses are not affected by this setting. + HeadersWithUnderscoresAction headers_with_underscores_action = 5; } // [#next-free-field: 6] diff --git a/api/envoy/api/v2/core/socket_option.proto b/api/envoy/api/v2/core/socket_option.proto index 9a044d1a9eb9..39678ad1b8bc 100644 --- a/api/envoy/api/v2/core/socket_option.proto +++ b/api/envoy/api/v2/core/socket_option.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.api.v2.core; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "SocketOptionProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Socket Option ] diff --git a/api/envoy/api/v2/discovery.proto b/api/envoy/api/v2/discovery.proto index 0794f82aa9d2..da2690f867fc 100644 --- a/api/envoy/api/v2/discovery.proto +++ b/api/envoy/api/v2/discovery.proto @@ -8,11 +8,13 @@ import "google/protobuf/any.proto"; import "google/rpc/status.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "DiscoveryProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.discovery.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common discovery API components] diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto index 0917940aee84..b0d5c7c47370 100644 --- a/api/envoy/api/v2/eds.proto +++ b/api/envoy/api/v2/eds.proto @@ -10,6 +10,7 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/endpoint.proto"; @@ -19,6 +20,7 @@ option java_outer_classname = "EdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.endpoint.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` diff --git a/api/envoy/api/v2/endpoint.proto b/api/envoy/api/v2/endpoint.proto index 87d8713e8e1f..e233b0e7d34e 100644 --- a/api/envoy/api/v2/endpoint.proto +++ b/api/envoy/api/v2/endpoint.proto @@ -10,12 +10,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Endpoint configuration] // Endpoint discovery :ref:`architecture overview ` diff --git a/api/envoy/api/v2/endpoint/endpoint.proto b/api/envoy/api/v2/endpoint/endpoint.proto index 247c9ae265a5..9724fd72818d 100644 --- a/api/envoy/api/v2/endpoint/endpoint.proto +++ b/api/envoy/api/v2/endpoint/endpoint.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.api.v2.endpoint; +import "udpa/annotations/status.proto"; + import public "envoy/api/v2/endpoint/endpoint_components.proto"; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; diff --git a/api/envoy/api/v2/endpoint/endpoint_components.proto b/api/envoy/api/v2/endpoint/endpoint_components.proto index 5d2fe527588b..d7f209311697 100644 --- a/api/envoy/api/v2/endpoint/endpoint_components.proto +++ b/api/envoy/api/v2/endpoint/endpoint_components.proto @@ -9,12 +9,14 @@ import "envoy/api/v2/core/health_check.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; option java_outer_classname = "EndpointComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Endpoints] @@ -29,6 +31,13 @@ message Endpoint { // check port. Setting this with a non-zero value allows an upstream host // to have different health check address port. uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; + + // By default, the host header for L7 health checks is controlled by cluster level configuration + // (see: :ref:`host ` and + // :ref:`authority `). Setting this + // to a non-empty value allows overriding the cluster level configuration for a specific + // endpoint. + string hostname = 2; } // The upstream host address. @@ -50,6 +59,12 @@ message Endpoint { // This takes into effect only for upstream clusters with // :ref:`active health checking ` enabled. HealthCheckConfig health_check_config = 2; + + // The hostname associated with this endpoint. This hostname is not used for routing or address + // resolution. If provided, it will be associated with the endpoint, and can be used for features + // that require a hostname, like + // :ref:`auto_host_rewrite `. + string hostname = 3; } // An Endpoint that Envoy can route traffic to. diff --git a/api/envoy/api/v2/endpoint/load_report.proto b/api/envoy/api/v2/endpoint/load_report.proto index a80d5b77d929..928aed6102df 100644 --- a/api/envoy/api/v2/endpoint/load_report.proto +++ b/api/envoy/api/v2/endpoint/load_report.proto @@ -9,12 +9,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // These are stats Envoy reports to GLB every so often. Report frequency is // defined by diff --git a/api/envoy/api/v2/lds.proto b/api/envoy/api/v2/lds.proto index aa13453ebc68..d1e528f2472d 100644 --- a/api/envoy/api/v2/lds.proto +++ b/api/envoy/api/v2/lds.proto @@ -10,6 +10,7 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/listener.proto"; @@ -19,6 +20,7 @@ option java_outer_classname = "LdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listener] // Listener :ref:`configuration overview ` diff --git a/api/envoy/api/v2/listener.proto b/api/envoy/api/v2/listener.proto index 5873380db801..a93df10c3128 100644 --- a/api/envoy/api/v2/listener.proto +++ b/api/envoy/api/v2/listener.proto @@ -7,6 +7,7 @@ import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/socket_option.proto"; import "envoy/api/v2/listener/listener_components.proto"; import "envoy/api/v2/listener/udp_listener_config.proto"; +import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/config/listener/v2/api_listener.proto"; import "google/api/annotations.proto"; @@ -14,17 +15,19 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` -// [#next-free-field: 22] +// [#next-free-field: 23] message Listener { enum DrainType { // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check @@ -237,4 +240,8 @@ message Listener { // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket // `_. bool reuse_port = 21; + + // Configuration for :ref:`access logs ` + // emitted by this listener. + repeated config.filter.accesslog.v2.AccessLog access_log = 22; } diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto index 273b29cb5dd3..671da24b0445 100644 --- a/api/envoy/api/v2/listener/listener.proto +++ b/api/envoy/api/v2/listener/listener.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.api.v2.listener; +import "udpa/annotations/status.proto"; + import public "envoy/api/v2/listener/listener_components.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; diff --git a/api/envoy/api/v2/listener/listener_components.proto b/api/envoy/api/v2/listener/listener_components.proto index ec889d7f4f46..fe449c63358a 100644 --- a/api/envoy/api/v2/listener/listener_components.proto +++ b/api/envoy/api/v2/listener/listener_components.proto @@ -12,6 +12,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; @@ -20,6 +21,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listener components] // Listener :ref:`configuration overview ` @@ -207,9 +209,32 @@ message FilterChain { string name = 7; } -// [#not-implemented-hide:] // Listener filter chain match configuration. This is a recursive structure which allows complex // nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3306 +// - destination_port_range: +// start: 15000 +// end: 15001 +// // [#next-free-field: 6] message ListenerFilterChainMatchPredicate { // A set of match configurations used for logical operations. @@ -255,17 +280,8 @@ message ListenerFilter { google.protobuf.Any typed_config = 3; } - // [#not-implemented-hide:] - // Decide when to disable this listener filter on incoming traffic. - // Example: - // 0. always enable filter - // don't set `filter_disabled` - // 1. disable when the destination port is 3306 - // rule.destination_port_range = Int32Range {start = 3306, end = 3307} - // 2. disable when the destination port is 3306 or 15000 - // rule.or_match = MatchSet.rules [ - // rule.destination_port_range = Int32Range {start = 3306, end = 3307}, - // rule.destination_port_range = Int32Range {start = 15000, end = 15001}, - // ] + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. ListenerFilterChainMatchPredicate filter_disabled = 4; } diff --git a/api/envoy/api/v2/listener/quic_config.proto b/api/envoy/api/v2/listener/quic_config.proto index 69069f76b7e0..2a4616bb09c9 100644 --- a/api/envoy/api/v2/listener/quic_config.proto +++ b/api/envoy/api/v2/listener/quic_config.proto @@ -6,6 +6,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "QuicConfigProto"; @@ -13,6 +14,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: QUIC listener Config] diff --git a/api/envoy/api/v2/listener/udp_listener_config.proto b/api/envoy/api/v2/listener/udp_listener_config.proto index 31404b41d530..d4d29531f3aa 100644 --- a/api/envoy/api/v2/listener/udp_listener_config.proto +++ b/api/envoy/api/v2/listener/udp_listener_config.proto @@ -6,6 +6,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "UdpListenerConfigProto"; @@ -13,6 +14,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: UDP Listener Config] // Listener :ref:`configuration overview ` diff --git a/api/envoy/api/v2/ratelimit/ratelimit.proto b/api/envoy/api/v2/ratelimit/ratelimit.proto index 25fb2f2b0cbf..5ac72c69a6fb 100644 --- a/api/envoy/api/v2/ratelimit/ratelimit.proto +++ b/api/envoy/api/v2/ratelimit/ratelimit.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.api.v2.ratelimit; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.ratelimit"; option java_outer_classname = "RatelimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common rate limit components] diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto index f54308aafb55..fad73f175840 100644 --- a/api/envoy/api/v2/rds.proto +++ b/api/envoy/api/v2/rds.proto @@ -9,6 +9,7 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/route.proto"; @@ -18,6 +19,7 @@ option java_outer_classname = "RdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: RDS] diff --git a/api/envoy/api/v2/route.proto b/api/envoy/api/v2/route.proto index 87374611d808..549f134a7f43 100644 --- a/api/envoy/api/v2/route.proto +++ b/api/envoy/api/v2/route.proto @@ -9,12 +9,14 @@ import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP route configuration] // * Routing :ref:`architecture overview ` diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index ec13e9e5c801..92e44f1e19df 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.api.v2.route; +import "udpa/annotations/status.proto"; + import public "envoy/api/v2/route/route_components.proto"; option java_package = "io.envoyproxy.envoy.api.v2.route"; diff --git a/api/envoy/api/v2/route/route_components.proto b/api/envoy/api/v2/route/route_components.proto index 2ae4ee75ef30..c890134414e5 100644 --- a/api/envoy/api/v2/route/route_components.proto +++ b/api/envoy/api/v2/route/route_components.proto @@ -16,12 +16,14 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.route"; option java_outer_classname = "RouteComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP route components] // * Routing :ref:`architecture overview ` @@ -1274,7 +1276,7 @@ message Tracing { // statistics are perfect in the sense that they are emitted on the downstream // side such that they include network level failures. // -// Documentation for :ref:`virtual cluster statistics `. +// Documentation for :ref:`virtual cluster statistics `. // // .. note:: // diff --git a/api/envoy/api/v2/scoped_route.proto b/api/envoy/api/v2/scoped_route.proto index 43f81cf92027..0841bd08723c 100644 --- a/api/envoy/api/v2/scoped_route.proto +++ b/api/envoy/api/v2/scoped_route.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.api.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "ScopedRouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` diff --git a/api/envoy/api/v2/srds.proto b/api/envoy/api/v2/srds.proto index f874307d7e1e..0edb99a1eccb 100644 --- a/api/envoy/api/v2/srds.proto +++ b/api/envoy/api/v2/srds.proto @@ -8,6 +8,7 @@ import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import public "envoy/api/v2/scoped_route.proto"; @@ -16,6 +17,7 @@ option java_outer_classname = "SrdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: SRDS] // * Routing :ref:`architecture overview ` diff --git a/api/envoy/config/accesslog/v2/als.proto b/api/envoy/config/accesslog/v2/als.proto index 2486ffb81ed7..5b4106af106e 100644 --- a/api/envoy/config/accesslog/v2/als.proto +++ b/api/envoy/config/accesslog/v2/als.proto @@ -8,12 +8,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.grpc.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Access Log Service (ALS)] diff --git a/api/envoy/config/accesslog/v2/file.proto b/api/envoy/config/accesslog/v2/file.proto index 395c396d7033..9b8671c81358 100644 --- a/api/envoy/config/accesslog/v2/file.proto +++ b/api/envoy/config/accesslog/v2/file.proto @@ -5,12 +5,14 @@ package envoy.config.accesslog.v2; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; option java_outer_classname = "FileProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.file.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: File access log] // [#extension: envoy.access_loggers.file] diff --git a/api/envoy/config/accesslog/v2/wasm.proto b/api/envoy/config/accesslog/v2/wasm.proto index a7b4e2143999..3ece08a90fb0 100644 --- a/api/envoy/config/accesslog/v2/wasm.proto +++ b/api/envoy/config/accesslog/v2/wasm.proto @@ -7,12 +7,14 @@ import "envoy/config/wasm/v2/wasm.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.wasm.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Wasm access log] diff --git a/api/envoy/config/accesslog/v3/accesslog.proto b/api/envoy/config/accesslog/v3/accesslog.proto index 8bfc999f08c5..f5732ba3f8e4 100644 --- a/api/envoy/config/accesslog/v3/accesslog.proto +++ b/api/envoy/config/accesslog/v3/accesslog.proto @@ -9,13 +9,14 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v3"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common access log types] diff --git a/api/envoy/config/bootstrap/v2/BUILD b/api/envoy/config/bootstrap/v2/BUILD index ca88c778827b..f15f3d64622f 100644 --- a/api/envoy/config/bootstrap/v2/BUILD +++ b/api/envoy/config/bootstrap/v2/BUILD @@ -14,5 +14,6 @@ api_proto_package( "//envoy/config/overload/v2alpha:pkg", "//envoy/config/trace/v2:pkg", "//envoy/config/wasm/v2:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto index 7b6244d693b3..711e846a7dfc 100644 --- a/api/envoy/config/bootstrap/v2/bootstrap.proto +++ b/api/envoy/config/bootstrap/v2/bootstrap.proto @@ -7,6 +7,7 @@ import "envoy/api/v2/cluster.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; +import "envoy/api/v2/core/event_service_config.proto"; import "envoy/api/v2/core/socket_option.proto"; import "envoy/api/v2/listener.proto"; import "envoy/config/metrics/v2/stats.proto"; @@ -19,11 +20,13 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.bootstrap.v2"; option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Bootstrap] // This proto is supplied via the :option:`-c` CLI flag and acts as the root @@ -196,6 +199,11 @@ message ClusterManager { message OutlierDetection { // Specifies the path to the outlier event log. string event_log_path = 1; + + // [#not-implemented-hide:] + // The gRPC service for the outlier detection event service. + // If empty, outlier detection events won't be sent to a remote endpoint. + api.v2.core.EventServiceConfig event_service = 2; } // Name of the local cluster (i.e., the cluster that owns the Envoy running diff --git a/api/envoy/config/bootstrap/v3/BUILD b/api/envoy/config/bootstrap/v3/BUILD index cf8f11c4fdf6..3d63b7782529 100644 --- a/api/envoy/config/bootstrap/v3/BUILD +++ b/api/envoy/config/bootstrap/v3/BUILD @@ -14,8 +14,8 @@ api_proto_package( "//envoy/config/metrics/v3:pkg", "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v3:pkg", + "//envoy/config/wasm/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/extensions/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index 989e4dbd19f2..735a65466abd 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -6,26 +6,28 @@ import "envoy/config/cluster/v3/cluster.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/event_service_config.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v3/trace.proto"; +import "envoy/config/wasm/v3/wasm.proto"; import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; -import "envoy/extensions/wasm/v3/wasm.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.bootstrap.v3"; option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Bootstrap] // This proto is supplied via the :option:`-c` CLI flag and acts as the root @@ -175,7 +177,7 @@ message Bootstrap { bool use_tcp_for_dns_lookups = 20; // Configuration for an wasm service provider(s). - repeated envoy.extensions.wasm.v3.WasmService wasm_service = 21; + repeated wasm.v3.WasmService wasm_service = 21; } // Administration interface :ref:`operations documentation @@ -212,6 +214,11 @@ message ClusterManager { // Specifies the path to the outlier event log. string event_log_path = 1; + + // [#not-implemented-hide:] + // The gRPC service for the outlier detection event service. + // If empty, outlier detection events won't be sent to a remote endpoint. + core.v3.EventServiceConfig event_service = 2; } // Name of the local cluster (i.e., the cluster that owns the Envoy running diff --git a/api/envoy/config/bootstrap/v4alpha/BUILD b/api/envoy/config/bootstrap/v4alpha/BUILD new file mode 100644 index 000000000000..3234587e27e6 --- /dev/null +++ b/api/envoy/config/bootstrap/v4alpha/BUILD @@ -0,0 +1,21 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/bootstrap/v3:pkg", + "//envoy/config/cluster/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/listener/v3:pkg", + "//envoy/config/metrics/v3:pkg", + "//envoy/config/overload/v3:pkg", + "//envoy/config/trace/v4alpha:pkg", + "//envoy/config/wasm/v3:pkg", + "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto new file mode 100644 index 000000000000..149c3cf0ed2e --- /dev/null +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -0,0 +1,383 @@ +syntax = "proto3"; + +package envoy.config.bootstrap.v4alpha; + +import "envoy/config/cluster/v4alpha/cluster.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/event_service_config.proto"; +import "envoy/config/core/v4alpha/socket_option.proto"; +import "envoy/config/listener/v3/listener.proto"; +import "envoy/config/metrics/v3/stats.proto"; +import "envoy/config/overload/v3/overload.proto"; +import "envoy/config/trace/v4alpha/trace.proto"; +import "envoy/config/wasm/v3/wasm.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/cert.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.bootstrap.v4alpha"; +option java_outer_classname = "BootstrapProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Bootstrap] +// This proto is supplied via the :option:`-c` CLI flag and acts as the root +// of the Envoy v2 configuration. See the :ref:`v2 configuration overview +// ` for more detail. + +// Bootstrap :ref:`configuration overview `. +// [#next-free-field: 22] +message Bootstrap { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.Bootstrap"; + + message StaticResources { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.Bootstrap.StaticResources"; + + // Static :ref:`Listeners `. These listeners are + // available regardless of LDS configuration. + repeated listener.v3.Listener listeners = 1; + + // If a network based configuration source is specified for :ref:`cds_config + // `, it's necessary + // to have some initial cluster definitions available to allow Envoy to know + // how to speak to the management server. These cluster definitions may not + // use :ref:`EDS ` (i.e. they should be static + // IP or DNS-based). + repeated cluster.v4alpha.Cluster clusters = 2; + + // These static secrets can be used by :ref:`SdsSecretConfig + // ` + repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3; + } + + message DynamicResources { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.Bootstrap.DynamicResources"; + + reserved 4; + + // All :ref:`Listeners ` are provided by a single + // :ref:`LDS ` configuration source. + core.v4alpha.ConfigSource lds_config = 1; + + // All post-bootstrap :ref:`Cluster ` definitions are + // provided by a single :ref:`CDS ` + // configuration source. + core.v4alpha.ConfigSource cds_config = 2; + + // A single :ref:`ADS ` source may be optionally + // specified. This must have :ref:`api_type + // ` :ref:`GRPC + // `. Only + // :ref:`ConfigSources ` that have + // the :ref:`ads ` field set will be + // streamed on the ADS channel. + core.v4alpha.ApiConfigSource ads_config = 3; + } + + reserved 10, 11; + + reserved "runtime"; + + // Node identity to present to the management server and for instance + // identification purposes (e.g. in generated headers). + core.v4alpha.Node node = 1; + + // Statically specified resources. + StaticResources static_resources = 2; + + // xDS configuration sources. + DynamicResources dynamic_resources = 3; + + // Configuration for the cluster manager which owns all upstream clusters + // within the server. + ClusterManager cluster_manager = 4; + + // Health discovery service config option. + // (:ref:`core.ApiConfigSource `) + core.v4alpha.ApiConfigSource hds_config = 14; + + // Optional file system path to search for startup flag files. + string flags_path = 5; + + // Optional set of stats sinks. + repeated metrics.v3.StatsSink stats_sinks = 6; + + // Configuration for internal processing of stats. + metrics.v3.StatsConfig stats_config = 13; + + // Optional duration between flushes to configured stats sinks. For + // performance reasons Envoy latches counters and only flushes counters and + // gauges at a periodic interval. If not specified the default is 5000ms (5 + // seconds). + // Duration must be at least 1ms and at most 5 min. + google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { + lt {seconds: 300} + gte {nanos: 1000000} + }]; + + // Optional watchdog configuration. + Watchdog watchdog = 8; + + // Configuration for an external tracing provider. If not specified, no + // tracing will be performed. + trace.v4alpha.Tracing tracing = 9; + + // Configuration for the runtime configuration provider. If not + // specified, a “null†provider will be used which will result in all defaults + // being used. + LayeredRuntime layered_runtime = 17; + + // Configuration for the local administration HTTP server. + Admin admin = 12; + + // Optional overload manager configuration. + overload.v3.OverloadManager overload_manager = 15; + + // Enable :ref:`stats for event dispatcher `, defaults to false. + // Note that this records a value for each iteration of the event loop on every thread. This + // should normally be minimal overhead, but when using + // :ref:`statsd `, it will send each observed value + // over the wire individually because the statsd protocol doesn't have any way to represent a + // histogram summary. Be aware that this can be a very large volume of data. + bool enable_dispatcher_stats = 16; + + // Optional string which will be used in lieu of x-envoy in prefixing headers. + // + // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be + // transformed into x-foo-retry-on etc. + // + // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the + // headers Envoy will trust for core code and core extensions only. Be VERY careful making + // changes to this string, especially in multi-layer Envoy deployments or deployments using + // extensions which are not upstream. + string header_prefix = 18; + + // Optional proxy version which will be used to set the value of :ref:`server.version statistic + // ` if specified. Envoy will not process this value, it will be sent as is to + // :ref:`stats sinks `. + google.protobuf.UInt64Value stats_server_version_override = 19; + + // Always use TCP queries instead of UDP queries for DNS lookups. + // This may be overridden on a per-cluster basis in cds_config, + // when :ref:`dns_resolvers ` and + // :ref:`use_tcp_for_dns_lookups ` are + // specified. + bool use_tcp_for_dns_lookups = 20; + + // Configuration for an wasm service provider(s). + repeated wasm.v3.WasmService wasm_service = 21; +} + +// Administration interface :ref:`operations documentation +// `. +message Admin { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Admin"; + + // The path to write the access log for the administration server. If no + // access log is desired specify ‘/dev/null’. This is only required if + // :ref:`address ` is set. + string access_log_path = 1; + + // The cpu profiler output path for the administration server. If no profile + // path is specified, the default is ‘/var/log/envoy/envoy.prof’. + string profile_path = 2; + + // The TCP address that the administration server will listen on. + // If not specified, Envoy will not start an administration server. + core.v4alpha.Address address = 3; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated core.v4alpha.SocketOption socket_options = 4; +} + +// Cluster manager :ref:`architecture overview `. +message ClusterManager { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.ClusterManager"; + + message OutlierDetection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.ClusterManager.OutlierDetection"; + + // Specifies the path to the outlier event log. + string event_log_path = 1; + + // [#not-implemented-hide:] + // The gRPC service for the outlier detection event service. + // If empty, outlier detection events won't be sent to a remote endpoint. + core.v4alpha.EventServiceConfig event_service = 2; + } + + // Name of the local cluster (i.e., the cluster that owns the Envoy running + // this configuration). In order to enable :ref:`zone aware routing + // ` this option must be set. + // If *local_cluster_name* is defined then :ref:`clusters + // ` must be defined in the :ref:`Bootstrap + // static cluster resources + // `. This is unrelated to + // the :option:`--service-cluster` option which does not `affect zone aware + // routing `_. + string local_cluster_name = 1; + + // Optional global configuration for outlier detection. + OutlierDetection outlier_detection = 2; + + // Optional configuration used to bind newly established upstream connections. + // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. + core.v4alpha.BindConfig upstream_bind_config = 3; + + // A management server endpoint to stream load stats to via + // *StreamLoadStats*. This must have :ref:`api_type + // ` :ref:`GRPC + // `. + core.v4alpha.ApiConfigSource load_stats_config = 4; +} + +// Envoy process watchdog configuration. When configured, this monitors for +// nonresponsive threads and kills the process after the configured thresholds. +// See the :ref:`watchdog documentation ` for more information. +message Watchdog { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; + + // The duration after which Envoy counts a nonresponsive thread in the + // *watchdog_miss* statistic. If not specified the default is 200ms. + google.protobuf.Duration miss_timeout = 1; + + // The duration after which Envoy counts a nonresponsive thread in the + // *watchdog_mega_miss* statistic. If not specified the default is + // 1000ms. + google.protobuf.Duration megamiss_timeout = 2; + + // If a watched thread has been nonresponsive for this duration, assume a + // programming error and kill the entire Envoy process. Set to 0 to disable + // kill behavior. If not specified the default is 0 (disabled). + google.protobuf.Duration kill_timeout = 3; + + // If at least two watched threads have been nonresponsive for at least this + // duration assume a true deadlock and kill the entire Envoy process. Set to 0 + // to disable this behavior. If not specified the default is 0 (disabled). + google.protobuf.Duration multikill_timeout = 4; +} + +// Runtime :ref:`configuration overview ` (deprecated). +message Runtime { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Runtime"; + + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. Envoy + // will watch the location for changes and reload the file system tree when + // they happen. If this parameter is not set, there will be no disk based + // runtime. + string symlink_root = 1; + + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + string subdirectory = 2; + + // Specifies an optional subdirectory to load within the root directory. If + // specified and the directory exists, configuration values within this + // directory will override those found in the primary subdirectory. This is + // useful when Envoy is deployed across many different types of servers. + // Sometimes it is useful to have a per service cluster directory for runtime + // configuration. See below for exactly how the override directory is used. + string override_subdirectory = 3; + + // Static base runtime. This will be :ref:`overridden + // ` by other runtime layers, e.g. + // disk or admin. This follows the :ref:`runtime protobuf JSON representation + // encoding `. + google.protobuf.Struct base = 4; +} + +// [#next-free-field: 6] +message RuntimeLayer { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.RuntimeLayer"; + + // :ref:`Disk runtime ` layer. + message DiskLayer { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer"; + + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. + // Envoy will watch the location for changes and reload the file system tree + // when they happen. See documentation on runtime :ref:`atomicity + // ` for further details on how reloads are + // treated. + string symlink_root = 1; + + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + string subdirectory = 3; + + // :ref:`Append ` the + // service cluster to the path under symlink root. + bool append_service_cluster = 2; + } + + // :ref:`Admin console runtime ` layer. + message AdminLayer { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer"; + } + + // :ref:`Runtime Discovery Service (RTDS) ` layer. + message RtdsLayer { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer"; + + // Resource to subscribe to at *rtds_config* for the RTDS layer. + string name = 1; + + // RTDS configuration source. + core.v4alpha.ConfigSource rtds_config = 2; + } + + // Descriptive name for the runtime layer. This is only used for the runtime + // :http:get:`/runtime` output. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof layer_specifier { + option (validate.required) = true; + + // :ref:`Static runtime ` layer. + // This follows the :ref:`runtime protobuf JSON representation encoding + // `. Unlike static xDS resources, this static + // layer is overridable by later layers in the runtime virtual filesystem. + google.protobuf.Struct static_layer = 2; + + DiskLayer disk_layer = 3; + + AdminLayer admin_layer = 4; + + RtdsLayer rtds_layer = 5; + } +} + +// Runtime :ref:`configuration overview `. +message LayeredRuntime { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.LayeredRuntime"; + + // The :ref:`layers ` of the runtime. This is ordered + // such that later layers in the list overlay earlier entries. + repeated RuntimeLayer layers = 1; +} diff --git a/api/envoy/config/cluster/aggregate/v2alpha/cluster.proto b/api/envoy/config/cluster/aggregate/v2alpha/cluster.proto index 8d70015100be..a0fdadd75724 100644 --- a/api/envoy/config/cluster/aggregate/v2alpha/cluster.proto +++ b/api/envoy/config/cluster/aggregate/v2alpha/cluster.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.cluster.aggregate.v2alpha; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.aggregate.v2alpha"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.clusters.aggregate.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Aggregate cluster configuration] diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto index 24252699d846..33f5ffe057e3 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto @@ -5,6 +5,7 @@ package envoy.config.cluster.dynamic_forward_proxy.v2alpha; import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha"; @@ -12,6 +13,7 @@ option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.clusters.dynamic_forward_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamic forward proxy cluster configuration] diff --git a/api/envoy/config/cluster/redis/BUILD b/api/envoy/config/cluster/redis/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/cluster/redis/BUILD +++ b/api/envoy/config/cluster/redis/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/cluster/redis/redis_cluster.proto b/api/envoy/config/cluster/redis/redis_cluster.proto index f36345c337e6..b1872501e8eb 100644 --- a/api/envoy/config/cluster/redis/redis_cluster.proto +++ b/api/envoy/config/cluster/redis/redis_cluster.proto @@ -5,11 +5,13 @@ package envoy.config.cluster.redis; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.redis"; option java_outer_classname = "RedisClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Redis Cluster Configuration] // This cluster adds support for `Redis Cluster `_, as part diff --git a/api/envoy/config/cluster/v3/circuit_breaker.proto b/api/envoy/config/cluster/v3/circuit_breaker.proto index 42de29b01e5b..96e69701cda2 100644 --- a/api/envoy/config/cluster/v3/circuit_breaker.proto +++ b/api/envoy/config/cluster/v3/circuit_breaker.proto @@ -7,13 +7,14 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Circuit breakers] diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 5e7edd568263..06de8bbbead0 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -18,14 +18,15 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cluster configuration] @@ -373,7 +374,7 @@ message Cluster { } // Common configuration for all load balancer implementations. - // [#next-free-field: 7] + // [#next-free-field: 8] message CommonLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.CommonLbConfig"; @@ -411,6 +412,16 @@ message Cluster { "envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; } + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + message ConsistentHashingLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; + + // If set to `true`, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + bool use_hostname_for_hashing = 1; + } + // Configures the :ref:`healthy panic threshold `. // If not specified, the default is 50%. // To disable panic mode, set to 0%. @@ -465,6 +476,9 @@ message Cluster { // If set to `true`, the cluster manager will drain all existing // connections to upstream hosts whenever hosts are added or removed from the cluster. bool close_connections_on_host_set_change = 6; + + //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig consistent_hashing_lb_config = 7; } message RefreshRate { diff --git a/api/envoy/config/cluster/v3/filter.proto b/api/envoy/config/cluster/v3/filter.proto index 9ded0fbbb12b..af3116ec26eb 100644 --- a/api/envoy/config/cluster/v3/filter.proto +++ b/api/envoy/config/cluster/v3/filter.proto @@ -4,13 +4,14 @@ package envoy.config.cluster.v3; import "google/protobuf/any.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "FilterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Upstream filters] // Upstream filters apply to the connections to the upstream cluster hosts. diff --git a/api/envoy/config/cluster/v3/outlier_detection.proto b/api/envoy/config/cluster/v3/outlier_detection.proto index 1364b197f5cb..c0b4d5732db5 100644 --- a/api/envoy/config/cluster/v3/outlier_detection.proto +++ b/api/envoy/config/cluster/v3/outlier_detection.proto @@ -5,13 +5,14 @@ package envoy.config.cluster.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "OutlierDetectionProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Outlier detection] diff --git a/api/envoy/config/cluster/v4alpha/BUILD b/api/envoy/config/cluster/v4alpha/BUILD new file mode 100644 index 000000000000..3aff84b82faa --- /dev/null +++ b/api/envoy/config/cluster/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/cluster/v3:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/endpoint/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/cluster/v4alpha/circuit_breaker.proto b/api/envoy/config/cluster/v4alpha/circuit_breaker.proto new file mode 100644 index 000000000000..57a263a70d2e --- /dev/null +++ b/api/envoy/config/cluster/v4alpha/circuit_breaker.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.config.cluster.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; +option java_outer_classname = "CircuitBreakerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Circuit breakers] + +// :ref:`Circuit breaking` settings can be +// specified individually for each defined priority. +message CircuitBreakers { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.CircuitBreakers"; + + // A Thresholds defines CircuitBreaker settings for a + // :ref:`RoutingPriority`. + // [#next-free-field: 9] + message Thresholds { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.CircuitBreakers.Thresholds"; + + message RetryBudget { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget"; + + // Specifies the limit on concurrent retries as a percentage of the sum of active requests and + // active pending requests. For example, if there are 100 active requests and the + // budget_percent is set to 25, there may be 25 active retries. + // + // This parameter is optional. Defaults to 20%. + type.v3.Percent budget_percent = 1; + + // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the + // number of active retries may never go below this number. + // + // This parameter is optional. Defaults to 3. + google.protobuf.UInt32Value min_retry_concurrency = 2; + } + + // The :ref:`RoutingPriority` + // the specified CircuitBreaker settings apply to. + core.v4alpha.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; + + // The maximum number of connections that Envoy will make to the upstream + // cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_connections = 2; + + // The maximum number of pending requests that Envoy will allow to the + // upstream cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_pending_requests = 3; + + // The maximum number of parallel requests that Envoy will make to the + // upstream cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_requests = 4; + + // The maximum number of parallel retries that Envoy will allow to the + // upstream cluster. If not specified, the default is 3. + google.protobuf.UInt32Value max_retries = 5; + + // Specifies a limit on concurrent retries in relation to the number of active requests. This + // parameter is optional. + // + // .. note:: + // + // If this field is set, the retry budget will override any configured retry circuit + // breaker. + RetryBudget retry_budget = 8; + + // If track_remaining is true, then stats will be published that expose + // the number of resources remaining until the circuit breakers open. If + // not specified, the default is false. + // + // .. note:: + // + // If a retry budget is used in lieu of the max_retries circuit breaker, + // the remaining retry resources remaining will not be tracked. + bool track_remaining = 6; + + // The maximum number of connection pools per cluster that Envoy will concurrently support at + // once. If not specified, the default is unlimited. Set this for clusters which create a + // large number of connection pools. See + // :ref:`Circuit Breaking ` for + // more details. + google.protobuf.UInt32Value max_connection_pools = 7; + } + + // If multiple :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no Thresholds is defined for a given + // :ref:`RoutingPriority`, the default values + // are used. + repeated Thresholds thresholds = 1; +} diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto new file mode 100644 index 000000000000..887ef9c3fe33 --- /dev/null +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -0,0 +1,873 @@ +syntax = "proto3"; + +package envoy.config.cluster.v4alpha; + +import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; +import "envoy/config/cluster/v4alpha/filter.proto"; +import "envoy/config/cluster/v4alpha/outlier_detection.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/health_check.proto"; +import "envoy/config/core/v4alpha/protocol.proto"; +import "envoy/config/endpoint/v3/endpoint.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; +option java_outer_classname = "ClusterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Cluster configuration] + +// Configuration for a single upstream cluster. +// [#next-free-field: 48] +message Cluster { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; + + // Refer to :ref:`service discovery type ` + // for an explanation on each type. + enum DiscoveryType { + // Refer to the :ref:`static discovery type` + // for an explanation. + STATIC = 0; + + // Refer to the :ref:`strict DNS discovery + // type` + // for an explanation. + STRICT_DNS = 1; + + // Refer to the :ref:`logical DNS discovery + // type` + // for an explanation. + LOGICAL_DNS = 2; + + // Refer to the :ref:`service discovery type` + // for an explanation. + EDS = 3; + + // Refer to the :ref:`original destination discovery + // type` + // for an explanation. + ORIGINAL_DST = 4; + } + + // Refer to :ref:`load balancer type ` architecture + // overview section for information on each type. + enum LbPolicy { + reserved 4; + + reserved "ORIGINAL_DST_LB"; + + // Refer to the :ref:`round robin load balancing + // policy` + // for an explanation. + ROUND_ROBIN = 0; + + // Refer to the :ref:`least request load balancing + // policy` + // for an explanation. + LEAST_REQUEST = 1; + + // Refer to the :ref:`ring hash load balancing + // policy` + // for an explanation. + RING_HASH = 2; + + // Refer to the :ref:`random load balancing + // policy` + // for an explanation. + RANDOM = 3; + + // Refer to the :ref:`Maglev load balancing policy` + // for an explanation. + MAGLEV = 5; + + // This load balancer type must be specified if the configured cluster provides a cluster + // specific load balancer. Consult the configured cluster's documentation for whether to set + // this option or not. + CLUSTER_PROVIDED = 6; + + // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy + // ` field to determine the LB policy. + // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field + // and instead using the new load_balancing_policy field as the one and only mechanism for + // configuring this.] + LOAD_BALANCING_POLICY_CONFIG = 7; + } + + // When V4_ONLY is selected, the DNS resolver will only perform a lookup for + // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will + // only perform a lookup for addresses in the IPv6 family. If AUTO is + // specified, the DNS resolver will first perform a lookup for addresses in + // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + // For cluster types other than + // :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS`, + // this setting is + // ignored. + enum DnsLookupFamily { + AUTO = 0; + V4_ONLY = 1; + V6_ONLY = 2; + } + + enum ClusterProtocolSelection { + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + USE_CONFIGURED_PROTOCOL = 0; + + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + USE_DOWNSTREAM_PROTOCOL = 1; + } + + // TransportSocketMatch specifies what transport socket config will be used + // when the match conditions are satisfied. + message TransportSocketMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.TransportSocketMatch"; + + // The name of the match, used in stats generation. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Optional endpoint metadata match criteria. + // The connection to the endpoint with metadata matching what is set in this field + // will use the transport socket configuration specified here. + // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match + // against the values specified in this field. + google.protobuf.Struct match = 2; + + // The configuration of the transport socket. + core.v4alpha.TransportSocket transport_socket = 3; + } + + // Extended cluster type. + message CustomClusterType { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CustomClusterType"; + + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + google.protobuf.Any typed_config = 2; + } + + // Only valid when discovery type is EDS. + message EdsClusterConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.EdsClusterConfig"; + + // Configuration for the source of EDS updates for this Cluster. + core.v4alpha.ConfigSource eds_config = 1; + + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + } + + // Optionally divide the endpoints in this cluster into subsets defined by + // endpoint metadata and selected by route and weighted cluster metadata. + // [#next-free-field: 8] + message LbSubsetConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.LbSubsetConfig"; + + // If NO_FALLBACK is selected, a result + // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, + // any cluster endpoint may be returned (subject to policy, health checks, + // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + enum LbSubsetFallbackPolicy { + NO_FALLBACK = 0; + ANY_ENDPOINT = 1; + DEFAULT_SUBSET = 2; + } + + // Specifications for subsets. + message LbSubsetSelector { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector"; + + // Allows to override top level fallback policy per selector. + enum LbSubsetSelectorFallbackPolicy { + // If NOT_DEFINED top level config fallback policy is used instead. + NOT_DEFINED = 0; + + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. + NO_FALLBACK = 1; + + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned + // (subject to policy, health checks, etc). + ANY_ENDPOINT = 2; + + // If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + DEFAULT_SUBSET = 3; + + // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata + // keys reduced to + // :ref:`fallback_keys_subset`. + // It allows for a fallback to a different, less specific selector if some of the keys of + // the selector are considered optional. + KEYS_SUBSET = 4; + } + + // List of keys to match with the weighted cluster metadata. + repeated string keys = 1; + + // The behavior used when no endpoint subset matches the selected route's + // metadata. + LbSubsetSelectorFallbackPolicy fallback_policy = 2 + [(validate.rules).enum = {defined_only: true}]; + + // Subset of + // :ref:`keys` used by + // :ref:`KEYS_SUBSET` + // fallback policy. + // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. + // For any other fallback policy the parameter is not used and should not be set. + // Only values also present in + // :ref:`keys` are allowed, but + // `fallback_keys_subset` cannot be equal to `keys`. + repeated string fallback_keys_subset = 3; + } + + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; + + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the *envoy.lb* + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + google.protobuf.Struct default_subset = 2; + + // For each entry, LbEndpoint.Metadata's + // *envoy.lb* namespace is traversed and a subset is created for each unique + // combination of key and value. For example: + // + // .. code-block:: json + // + // { "subset_selectors": [ + // { "keys": [ "version" ] }, + // { "keys": [ "stage", "hardware_type" ] } + // ]} + // + // A subset is matched when the metadata from the selected route and + // weighted cluster contains the same keys and values as the subset's + // metadata. The same host may appear in multiple subsets. + repeated LbSubsetSelector subset_selectors = 3; + + // If true, routing to subsets will take into account the localities and locality weights of the + // endpoints when making the routing decision. + // + // There are some potential pitfalls associated with enabling this feature, as the resulting + // traffic split after applying both a subset match and locality weights might be undesirable. + // + // Consider for example a situation in which you have 50/50 split across two localities X/Y + // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 + // host selected but Y having 100, then a lot more load is being dumped on the single host in X + // than originally anticipated in the load balancing assignment delivered via EDS. + bool locality_weight_aware = 4; + + // When used with locality_weight_aware, scales the weight of each locality by the ratio + // of hosts in the subset vs hosts in the original subset. This aims to even out the load + // going to an individual locality if said locality is disproportionately affected by the + // subset predicate. + bool scale_locality_weight = 5; + + // If true, when a fallback policy is configured and its corresponding subset fails to find + // a host this will cause any host to be selected instead. + // + // This is useful when using the default subset as the fallback policy, given the default + // subset might become empty. With this option enabled, if that happens the LB will attempt + // to select a host from the entire cluster. + bool panic_mode_any = 6; + + // If true, metadata specified for a metadata key will be matched against the corresponding + // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value + // and any of the elements in the list matches the criteria. + bool list_as_any = 7; + } + + // Specific configuration for the LeastRequest load balancing policy. + message LeastRequestLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.LeastRequestLbConfig"; + + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + } + + // Specific configuration for the :ref:`RingHash` + // load balancing policy. + message RingHashLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.RingHashLbConfig"; + + // The hash function used to hash hosts onto the ketama ring. + enum HashFunction { + // Use `xxHash `_, this is the default hash function. + XX_HASH = 0; + + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + MURMUR_HASH_2 = 1; + } + + reserved 2; + + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; + + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; + + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; + } + + // Specific configuration for the + // :ref:`Original Destination ` + // load balancing policy. + message OriginalDstLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.OriginalDstLbConfig"; + + // When true, :ref:`x-envoy-original-dst-host + // ` can be used to override destination + // address. + // + // .. attention:: + // + // This header isn't sanitized by default, so enabling this feature allows HTTP clients to + // route traffic to arbitrary hosts and/or ports, which may have serious security + // consequences. + bool use_http_header = 1; + } + + // Common configuration for all load balancer implementations. + // [#next-free-field: 8] + message CommonLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CommonLbConfig"; + + // Configuration for :ref:`zone aware routing + // `. + message ZoneAwareLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig"; + + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + type.v3.Percent routing_enabled = 1; + + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + google.protobuf.UInt64Value min_cluster_size = 2; + + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + bool fail_traffic_on_panic = 3; + } + + // Configuration for :ref:`locality weighted load balancing + // ` + message LocalityWeightedLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; + } + + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + message ConsistentHashingLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; + + // If set to `true`, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + bool use_hostname_for_hashing = 1; + } + + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // The specified percent will be truncated to the nearest 1%. + type.v3.Percent healthy_panic_threshold = 1; + + oneof locality_config_specifier { + ZoneAwareLbConfig zone_aware_lb_config = 2; + + LocalityWeightedLbConfig locality_weighted_lb_config = 3; + } + + // If set, all health check/weight/metadata updates that happen within this duration will be + // merged and delivered in one shot when the duration expires. The start of the duration is when + // the first update happens. This is useful for big clusters, with potentially noisy deploys + // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes + // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new + // cluster). Please always keep in mind that the use of sandbox technologies may change this + // behavior. + // + // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge + // window to 0. + // + // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is + // because merging those updates isn't currently safe. See + // https://github.com/envoyproxy/envoy/pull/3941. + google.protobuf.Duration update_merge_window = 4; + + // If set to true, Envoy will not consider new hosts when computing load balancing weights until + // they have been health checked for the first time. This will have no effect unless + // active health checking is also configured. + // + // Ignoring a host means that for any load balancing calculations that adjust weights based + // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and + // panic mode) Envoy will exclude these hosts in the denominator. + // + // For example, with hosts in two priorities P0 and P1, where P0 looks like + // {healthy, unhealthy (new), unhealthy (new)} + // and where P1 looks like + // {healthy, healthy} + // all traffic will still hit P0, as 1 / (3 - 2) = 1. + // + // Enabling this will allow scaling up the number of hosts for a given cluster without entering + // panic mode or triggering priority spillover, assuming the hosts pass the first health check. + // + // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not + // contribute to the calculation when deciding whether panic mode is enabled or not. + bool ignore_new_hosts_until_first_hc = 5; + + // If set to `true`, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + bool close_connections_on_host_set_change = 6; + + //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig consistent_hashing_lb_config = 7; + } + + message RefreshRate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.RefreshRate"; + + // Specifies the base interval between refreshes. This parameter is required and must be greater + // than zero and less than + // :ref:`max_interval `. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {nanos: 1000000} + }]; + + // Specifies the maximum interval between refreshes. This parameter is optional, but must be + // greater than or equal to the + // :ref:`base_interval ` if set. The default + // is 10 times the :ref:`base_interval `. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; + } + + reserved 12, 15, 7, 11, 35; + + reserved "hosts", "tls_context", "extension_protocol_options"; + + // Configuration to use different transport sockets for different endpoints. + // The entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata ` + // is used to match against the transport sockets as they appear in the list. The first + // :ref:`match ` is used. + // For example, with the following match + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "enableMTLS" + // match: + // acceptMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // - name: "defaultToPlaintext" + // match: {} + // transport_socket: + // name: envoy.transport_sockets.raw_buffer + // + // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + // + // If a :ref:`socket match ` with empty match + // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + // socket match in case above. + // + // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or + // *transport_socket* specified in this cluster. + // + // This field allows gradual and flexible transport socket configuration changes. + // + // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + // has "acceptPlaintext": "true" metadata information. + // + // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + // *TransportSocketMatch* in this field. Other client Envoys receive CDS without + // *transport_socket_match* set, and still send plain text traffic to the same cluster. + // + // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] + repeated TransportSocketMatch transport_socket_matches = 43; + + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // An optional alternative to the cluster name to be used while emitting stats. + // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be + // confused with :ref:`Router Filter Header + // `. + string alt_stat_name = 28; + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } + + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig eds_cluster_config = 3; + + // The timeout for new network connections to hosts in the cluster. + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes the *hosts* field in the v2 API. + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + // + endpoint.v3.ClusterLoadAssignment load_assignment = 33; + + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + repeated core.v4alpha.HealthCheck health_checks = 8; + + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + google.protobuf.UInt32Value max_requests_per_connection = 9; + + // Optional :ref:`circuit breaking ` for the cluster. + CircuitBreakers circuit_breakers = 10; + + // HTTP protocol options that are applied only to upstream HTTP connections. + // These options apply to all HTTP versions. + core.v4alpha.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; + + // Additional options when handling HTTP requests upstream. These options will be applicable to + // both HTTP1 and HTTP2 requests. + core.v4alpha.HttpProtocolOptions common_http_protocol_options = 29; + + // Additional options when handling HTTP1 requests. + core.v4alpha.Http1ProtocolOptions http_protocol_options = 13; + + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map typed_extension_protocol_options = 36; + + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. The value configured must be at least 1ms. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + google.protobuf.Duration dns_refresh_rate = 16 + [(validate.rules).duration = {gt {nanos: 1000000}}]; + + // If the DNS failure refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is + // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types + // other than :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS` this setting is + // ignored. + RefreshRate dns_failure_refresh_rate = 44; + + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + bool respect_dns_ttl = 39; + + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + repeated core.v4alpha.Address dns_resolvers = 18; + + // [#next-major-version: Reconcile DNS options in a single message.] + // Always use TCP queries instead of UDP queries for DNS lookups. + bool use_tcp_for_dns_lookups = 45; + + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + OutlierDetection outlier_detection = 19; + + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + core.v4alpha.BindConfig upstream_bind_config = 21; + + // Configuration for load balancing subsetting. + LbSubsetConfig lb_subset_config = 22; + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; + + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; + + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + } + + // Common configuration for all load balancer implementations. + CommonLbConfig common_lb_config = 27; + + // Optional custom transport socket implementation to use for upstream connections. + // To setup TLS, set a transport socket with name `tls` and + // :ref:`UpstreamTlsContexts ` in the `typed_config`. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + core.v4alpha.TransportSocket transport_socket = 24; + + // The Metadata field can be used to provide additional information about the + // cluster. It can be used for stats, logging, and varying filter behavior. + // Fields should use reverse DNS notation to denote which entity within Envoy + // will need the information. For instance, if the metadata is intended for + // the Router filter, the filter name should be specified as *envoy.filters.http.router*. + core.v4alpha.Metadata metadata = 25; + + // Determines how Envoy selects the protocol used to speak to upstream hosts. + ClusterProtocolSelection protocol_selection = 26; + + // Optional options for upstream connections. + UpstreamConnectionOptions upstream_connection_options = 30; + + // If an upstream host becomes unhealthy (as determined by the configured health checks + // or outlier detection), immediately close all connections to the failed host. + // + // .. note:: + // + // This is currently only supported for connections created by tcp_proxy. + // + // .. note:: + // + // The current implementation of this feature closes all connections immediately when + // the unhealthy status is detected. If there are a large number of connections open + // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of + // time exclusively closing these connections, and not processing any other traffic. + bool close_connections_on_host_health_failure = 31; + + // If set to true, Envoy will ignore the health value of a host when processing its removal + // from service discovery. This means that if active health checking is used, Envoy will *not* + // wait for the endpoint to go unhealthy before removing it. + bool ignore_health_on_host_removal = 32; + + // An (optional) network filter chain, listed in the order the filters should be applied. + // The chain will be applied to all outgoing connections that Envoy makes to the upstream + // servers of this cluster. + repeated Filter filters = 40; + + // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the + // :ref:`lb_policy` field has the value + // :ref:`LOAD_BALANCING_POLICY_CONFIG`. + LoadBalancingPolicy load_balancing_policy = 41; + + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + core.v4alpha.ConfigSource lrs_server = 42; + + // If track_timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool track_timeout_budgets = 47; +} + +// [#not-implemented-hide:] Extensible load balancing policy configuration. +// +// Every LB policy defined via this mechanism will be identified via a unique name using reverse +// DNS notation. If the policy needs configuration parameters, it must define a message for its +// own configuration, which will be stored in the config field. The name of the policy will tell +// clients which type of message they should expect to see in the config field. +// +// Note that there are cases where it is useful to be able to independently select LB policies +// for choosing a locality and for choosing an endpoint within that locality. For example, a +// given deployment may always use the same policy to choose the locality, but for choosing the +// endpoint within the locality, some clusters may use weighted-round-robin, while others may +// use some sort of session-based balancing. +// +// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a +// child LB policy for each locality. For each request, the parent chooses the locality and then +// delegates to the child policy for that locality to choose the endpoint within the locality. +// +// To facilitate this, the config message for the top-level LB policy may include a field of +// type LoadBalancingPolicy that specifies the child policy. +message LoadBalancingPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.LoadBalancingPolicy"; + + message Policy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.LoadBalancingPolicy.Policy"; + + reserved 2; + + reserved "config"; + + // Required. The name of the LB policy. + string name = 1; + + google.protobuf.Any typed_config = 3; + } + + // Each client will iterate over the list in order and stop at the first policy that it + // supports. This provides a mechanism for starting to use new LB policies that are not yet + // supported by all clients. + repeated Policy policies = 1; +} + +// An extensible structure containing the address Envoy should bind to when +// establishing upstream connections. +message UpstreamBindConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.UpstreamBindConfig"; + + // The address Envoy should bind to when establishing upstream connections. + core.v4alpha.Address source_address = 1; +} + +message UpstreamConnectionOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.UpstreamConnectionOptions"; + + // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + core.v4alpha.TcpKeepalive tcp_keepalive = 1; +} diff --git a/api/envoy/config/cluster/v4alpha/filter.proto b/api/envoy/config/cluster/v4alpha/filter.proto new file mode 100644 index 000000000000..eb825fdeb6d5 --- /dev/null +++ b/api/envoy/config/cluster/v4alpha/filter.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.config.cluster.v4alpha; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; +option java_outer_classname = "FilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Upstream filters] +// Upstream filters apply to the connections to the upstream cluster hosts. + +message Filter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Filter"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Any typed_config = 2; +} diff --git a/api/envoy/config/cluster/v4alpha/outlier_detection.proto b/api/envoy/config/cluster/v4alpha/outlier_detection.proto new file mode 100644 index 000000000000..29a1e01270d9 --- /dev/null +++ b/api/envoy/config/cluster/v4alpha/outlier_detection.proto @@ -0,0 +1,151 @@ +syntax = "proto3"; + +package envoy.config.cluster.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; +option java_outer_classname = "OutlierDetectionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Outlier detection] + +// See the :ref:`architecture overview ` for +// more information on outlier detection. +// [#next-free-field: 21] +message OutlierDetection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.OutlierDetection"; + + // The number of consecutive 5xx responses or local origin errors that are mapped + // to 5xx error codes before a consecutive 5xx ejection + // occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_5xx = 1; + + // The time interval between ejection analysis sweeps. This can result in + // both new ejections as well as hosts being returned to service. Defaults + // to 10000ms or 10s. + google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; + + // The base time that a host is ejected for. The real time is equal to the + // base time multiplied by the number of times the host has been ejected. + // Defaults to 30000ms or 30s. + google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; + + // The maximum % of an upstream cluster that can be ejected due to outlier + // detection. Defaults to 10% but will eject at least one host regardless of the value. + google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive 5xx. This setting can be used to disable + // ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics. This setting can be used to + // disable ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; + + // The number of hosts in a cluster that must have enough request volume to + // detect success rate outliers. If the number of hosts is less than this + // setting, outlier detection via success rate statistics is not performed + // for any host in the cluster. Defaults to 5. + google.protobuf.UInt32Value success_rate_minimum_hosts = 7; + + // The minimum number of total requests that must be collected in one + // interval (as defined by the interval duration above) to include this host + // in success rate based outlier detection. If the volume is lower than this + // setting, outlier detection via success rate statistics is not performed + // for that host. Defaults to 100. + google.protobuf.UInt32Value success_rate_request_volume = 8; + + // This factor is used to determine the ejection threshold for success rate + // outlier ejection. The ejection threshold is the difference between the + // mean success rate, and the product of this factor and the standard + // deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + google.protobuf.UInt32Value success_rate_stdev_factor = 9; + + // The number of consecutive gateway failures (502, 503, 504 status codes) + // before a consecutive gateway failure ejection occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_gateway_failure = 10; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive gateway failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 0. + google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 + [(validate.rules).uint32 = {lte: 100}]; + + // Determines whether to distinguish local origin failures from external errors. If set to true + // the following configuration parameters are taken into account: + // :ref:`consecutive_local_origin_failure`, + // :ref:`enforcing_consecutive_local_origin_failure` + // and + // :ref:`enforcing_local_origin_success_rate`. + // Defaults to false. + bool split_external_local_origin_errors = 12; + + // The number of consecutive locally originated failures before ejection + // occurs. Defaults to 5. Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value consecutive_local_origin_failure = 13; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive locally originated failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics for locally originated errors. + // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 + [(validate.rules).uint32 = {lte: 100}]; + + // The failure percentage to use when determining failure percentage-based outlier detection. If + // the failure percentage of a given host is greater than or equal to this value, it will be + // ejected. Defaults to 85. + google.protobuf.UInt32Value failure_percentage_threshold = 16 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status is detected through + // failure percentage statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + // + // [#next-major-version: setting this without setting failure_percentage_threshold should be + // invalid in v4.] + google.protobuf.UInt32Value enforcing_failure_percentage = 17 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status is detected through + // local-origin failure percentage statistics. This setting can be used to disable ejection or to + // ramp it up slowly. Defaults to 0. + google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 + [(validate.rules).uint32 = {lte: 100}]; + + // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. + // If the total number of hosts in the cluster is less than this value, failure percentage-based + // ejection will not be performed. Defaults to 5. + google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; + + // The minimum number of total requests that must be collected in one interval (as defined by the + // interval duration above) to perform failure percentage-based ejection for this host. If the + // volume is lower than this setting, failure percentage-based ejection will not be performed for + // this host. Defaults to 50. + google.protobuf.UInt32Value failure_percentage_request_volume = 20; +} diff --git a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto index d96dfc2c4c99..3941c20aeb80 100644 --- a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto +++ b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto @@ -8,6 +8,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v2alpha"; @@ -15,6 +16,7 @@ option java_outer_classname = "DnsCacheProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.dynamic_forward_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamic forward proxy common configuration] @@ -46,7 +48,12 @@ message DnsCacheConfig { // // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be // added in a future change. - google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gt {}}]; + // + // .. note: + // + // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. + google.protobuf.Duration dns_refresh_rate = 3 + [(validate.rules).duration = {gte {nanos: 1000000}}]; // The TTL for hosts that are unused. Hosts that have not been used in the configured time // interval will be purged. If not specified defaults to 5m. diff --git a/api/envoy/config/common/tap/v2alpha/common.proto b/api/envoy/config/common/tap/v2alpha/common.proto index 5751b78cabbf..262557b35623 100644 --- a/api/envoy/config/common/tap/v2alpha/common.proto +++ b/api/envoy/config/common/tap/v2alpha/common.proto @@ -6,12 +6,14 @@ import "envoy/api/v2/core/config_source.proto"; import "envoy/service/tap/v2alpha/common.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.common.tap.v2alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.tap.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common tap extension configuration] diff --git a/api/envoy/config/core/v3/address.proto b/api/envoy/config/core/v3/address.proto index 92649ff6ae49..5102c2d57591 100644 --- a/api/envoy/config/core/v3/address.proto +++ b/api/envoy/config/core/v3/address.proto @@ -6,13 +6,14 @@ import "envoy/config/core/v3/socket_option.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "AddressProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Network addresses] diff --git a/api/envoy/config/core/v3/backoff.proto b/api/envoy/config/core/v3/backoff.proto index 63fc868435ad..55b504e71657 100644 --- a/api/envoy/config/core/v3/backoff.proto +++ b/api/envoy/config/core/v3/backoff.proto @@ -4,13 +4,14 @@ package envoy.config.core.v3; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "BackoffProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Backoff Strategy] diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto index ba1b5ded4329..b8ce5bff4bd5 100644 --- a/api/envoy/config/core/v3/base.proto +++ b/api/envoy/config/core/v3/base.proto @@ -13,13 +13,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "BaseProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common types] @@ -238,6 +239,17 @@ message RuntimeUInt32 { string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; } +// Runtime derived double with a default when not specified. +message RuntimeDouble { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeDouble"; + + // Default value if runtime value is not available. + double default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; +} + // Runtime derived bool with a default when not specified. message RuntimeFeatureFlag { option (udpa.annotations.versioning).previous_message_type = diff --git a/api/envoy/config/core/v3/config_source.proto b/api/envoy/config/core/v3/config_source.proto index 73dfe159824b..b56e06e6de4f 100644 --- a/api/envoy/config/core/v3/config_source.proto +++ b/api/envoy/config/core/v3/config_source.proto @@ -7,14 +7,15 @@ import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "ConfigSourceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Configuration sources] @@ -143,6 +144,8 @@ message ConfigSource { option (validate.required) = true; // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for :ref:`secret `, + // the certificate and key files are also watched for updates. // // .. note:: // diff --git a/api/envoy/config/core/v3/event_service_config.proto b/api/envoy/config/core/v3/event_service_config.proto new file mode 100644 index 000000000000..b3552e3975a3 --- /dev/null +++ b/api/envoy/config/core/v3/event_service_config.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/grpc_service.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "EventServiceConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#not-implemented-hide:] +// Configuration of the event reporting service endpoint. +message EventServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.EventServiceConfig"; + + oneof config_source_specifier { + option (validate.required) = true; + + // Specifies the gRPC service that hosts the event reporting service. + GrpcService grpc_service = 1; + } +} diff --git a/api/envoy/config/core/v3/grpc_method_list.proto b/api/envoy/config/core/v3/grpc_method_list.proto new file mode 100644 index 000000000000..800d7b5332a0 --- /dev/null +++ b/api/envoy/config/core/v3/grpc_method_list.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "GrpcMethodListProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: gRPC method list] + +// A list of gRPC methods which can be used as an allowlist, for example. +message GrpcMethodList { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcMethodList"; + + message Service { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcMethodList.Service"; + + // The name of the gRPC service. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The names of the gRPC methods in this service. + repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; + } + + repeated Service services = 1; +} diff --git a/api/envoy/config/core/v3/grpc_service.proto b/api/envoy/config/core/v3/grpc_service.proto index a2d41a7232ae..8719652a6bbe 100644 --- a/api/envoy/config/core/v3/grpc_service.proto +++ b/api/envoy/config/core/v3/grpc_service.proto @@ -10,13 +10,14 @@ import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "GrpcServiceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC services] diff --git a/api/envoy/config/core/v3/health_check.proto b/api/envoy/config/core/v3/health_check.proto index 624b7f6e5b15..52dda6f9b3c2 100644 --- a/api/envoy/config/core/v3/health_check.proto +++ b/api/envoy/config/core/v3/health_check.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/event_service_config.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http.proto"; import "envoy/type/v3/range.proto"; @@ -12,14 +13,15 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health check] // * Health checking :ref:`architecture overview `. @@ -52,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 22] +// [#next-free-field: 23] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; @@ -83,7 +85,8 @@ message HealthCheck { // The value of the host header in the HTTP health check request. If // left empty (default value), the name of the cluster this health check is associated - // with will be used. + // with will be used. The host header can be customized for a specific endpoint by setting the + // :ref:`hostname ` field. string host = 1; // Specifies the HTTP path that will be requested during health checking. For example @@ -164,7 +167,8 @@ message HealthCheck { // The value of the :authority header in the gRPC health check request. If // left empty (default value), the name of the cluster this health check is associated - // with will be used. + // with will be used. The authority header can be customized for a specific endpoint by setting + // the :ref:`hostname ` field. string authority = 2; } @@ -302,6 +306,11 @@ message HealthCheck { // If empty, no event log will be written. string event_log_path = 17; + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventServiceConfig event_service = 22; + // If set to true, health check failure events will always be logged. If set to false, only the // initial health check failure event will be logged. // The default value is false. diff --git a/api/envoy/config/core/v3/http_uri.proto b/api/envoy/config/core/v3/http_uri.proto index 481ba9378570..42bcd4f61572 100644 --- a/api/envoy/config/core/v3/http_uri.proto +++ b/api/envoy/config/core/v3/http_uri.proto @@ -4,13 +4,14 @@ package envoy.config.core.v3; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "HttpUriProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Service URI ] diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 8d23ba229e45..400b0dd95a94 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -5,13 +5,14 @@ package envoy.config.core.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "ProtocolProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Protocol options] @@ -37,12 +38,33 @@ message UpstreamHttpProtocolOptions { bool auto_san_validation = 2; } +// [#next-free-field: 6] message HttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpProtocolOptions"; + // Action to take when Envoy receives client request with header names containing underscore + // characters. + // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + // characters. + enum HeadersWithUnderscoresAction { + // Allow headers with underscores. This is the default behavior. + ALLOW = 0; + + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + REJECT_REQUEST = 1; + + // Drop the header with name containing underscores. The header is dropped before the filter chain is + // invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + DROP_HEADER = 2; + } + // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. If not set, there is no idle timeout. When the + // period in which there are no active requests. When the // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 // downstream connection a drain sequence will occur prior to closing the connection, see // :ref:`drain_timeout @@ -73,6 +95,11 @@ message HttpProtocolOptions { // The current implementation implements this timeout on downstream connections only. // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; + + // Action to take when a client request with a header name containing underscore characters is received. + // If this setting is not specified, the value defaults to ALLOW. + // Note: upstream responses are not affected by this setting. + HeadersWithUnderscoresAction headers_with_underscores_action = 5; } // [#next-free-field: 6] diff --git a/api/envoy/config/core/v3/socket_option.proto b/api/envoy/config/core/v3/socket_option.proto index 0de7848aea0d..b22169b86aeb 100644 --- a/api/envoy/config/core/v3/socket_option.proto +++ b/api/envoy/config/core/v3/socket_option.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.config.core.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "SocketOptionProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Socket Option ] diff --git a/api/envoy/config/core/v4alpha/BUILD b/api/envoy/config/core/v4alpha/BUILD new file mode 100644 index 000000000000..aeac38ac2833 --- /dev/null +++ b/api/envoy/config/core/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/core/v4alpha/address.proto b/api/envoy/config/core/v4alpha/address.proto new file mode 100644 index 000000000000..a2e6070103ae --- /dev/null +++ b/api/envoy/config/core/v4alpha/address.proto @@ -0,0 +1,145 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/socket_option.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "AddressProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Network addresses] + +message Pipe { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Pipe"; + + // Unix Domain Socket path. On Linux, paths starting with '@' will use the + // abstract namespace. The starting '@' is replaced by a null byte by Envoy. + // Paths starting with '@' will result in an error in environments other than + // Linux. + string path = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The mode for the Pipe. Not applicable for abstract sockets. + uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; +} + +// [#next-free-field: 7] +message SocketAddress { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketAddress"; + + enum Protocol { + TCP = 0; + UDP = 1; + } + + Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; + + // The address for this socket. :ref:`Listeners ` will bind + // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` + // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: + // It is possible to distinguish a Listener address via the prefix/suffix matching + // in :ref:`FilterChainMatch `.] When used + // within an upstream :ref:`BindConfig `, the address + // controls the source address of outbound connections. For :ref:`clusters + // `, the cluster type determines whether the + // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS + // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized + // via :ref:`resolver_name `. + string address = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof port_specifier { + option (validate.required) = true; + + uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; + + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. + string named_port = 4; + } + + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. + string resolver_name = 5; + + // When binding to an IPv6 address above, this enables `IPv4 compatibility + // `_. Binding to ``::`` will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as ``::FFFF:``. + bool ipv4_compat = 6; +} + +message TcpKeepalive { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.TcpKeepalive"; + + // Maximum number of keepalive probes to send without response before deciding + // the connection is dead. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 9.) + google.protobuf.UInt32Value keepalive_probes = 1; + + // The number of seconds a connection needs to be idle before keep-alive probes + // start being sent. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 7200s (i.e., 2 hours.) + google.protobuf.UInt32Value keepalive_time = 2; + + // The number of seconds between keep-alive probes. Default is to use the OS + // level configuration (unless overridden, Linux defaults to 75s.) + google.protobuf.UInt32Value keepalive_interval = 3; +} + +message BindConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BindConfig"; + + // The address to bind to when creating a socket. + SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; + + // Whether to set the *IP_FREEBIND* option when creating the socket. When this + // flag is set to true, allows the :ref:`source_address + // ` to be an IP address + // that is not configured on the system running Envoy. When this flag is set + // to false, the option *IP_FREEBIND* is disabled on the socket. When this + // flag is not set (default), the socket is not modified, i.e. the option is + // neither enabled nor disabled. + google.protobuf.BoolValue freebind = 2; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated SocketOption socket_options = 3; +} + +// Addresses specify either a logical or physical address and port, which are +// used to tell Envoy where to bind/listen, connect to upstream and find +// management servers. +message Address { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Address"; + + oneof address { + option (validate.required) = true; + + SocketAddress socket_address = 1; + + Pipe pipe = 2; + } +} + +// CidrRange specifies an IP Address and a prefix length to construct +// the subnet mask for a `CIDR `_ range. +message CidrRange { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.CidrRange"; + + // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. + string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Length of prefix, e.g. 0, 32. + google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; +} diff --git a/api/envoy/config/core/v4alpha/backoff.proto b/api/envoy/config/core/v4alpha/backoff.proto new file mode 100644 index 000000000000..07a2bdff175e --- /dev/null +++ b/api/envoy/config/core/v4alpha/backoff.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "BackoffProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Backoff Strategy] + +// Configuration defining a jittered exponential back off strategy. +message BackoffStrategy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.BackoffStrategy"; + + // The base interval to be used for the next back off computation. It should + // be greater than zero and less than or equal to :ref:`max_interval + // `. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, + // but must be greater than or equal to the :ref:`base_interval + // ` if set. The default + // is 10 times the :ref:`base_interval + // `. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; +} diff --git a/api/envoy/config/core/v4alpha/base.proto b/api/envoy/config/core/v4alpha/base.proto new file mode 100644 index 000000000000..dbc3c31e40e4 --- /dev/null +++ b/api/envoy/config/core/v4alpha/base.proto @@ -0,0 +1,421 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/backoff.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/type/v3/percent.proto"; +import "envoy/type/v3/semantic_version.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "BaseProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common types] + +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +enum RoutingPriority { + DEFAULT = 0; + HIGH = 1; +} + +// HTTP request method. +enum RequestMethod { + METHOD_UNSPECIFIED = 0; + GET = 1; + HEAD = 2; + POST = 3; + PUT = 4; + DELETE = 5; + CONNECT = 6; + OPTIONS = 7; + TRACE = 8; + PATCH = 9; +} + +// Identifies the direction of the traffic relative to the local Envoy. +enum TrafficDirection { + // Default option is unspecified. + UNSPECIFIED = 0; + + // The transport is used for incoming traffic. + INBOUND = 1; + + // The transport is used for outgoing traffic. + OUTBOUND = 2; +} + +// Identifies location of where either Envoy runs or where upstream hosts run. +message Locality { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Locality"; + + // Region this :ref:`zone ` belongs to. + string region = 1; + + // Defines the local service zone where Envoy is running. Though optional, it + // should be set if discovery service routing is used and the discovery + // service exposes :ref:`zone data `, + // either in this message or via :option:`--service-zone`. The meaning of zone + // is context dependent, e.g. `Availability Zone (AZ) + // `_ + // on AWS, `Zone `_ on + // GCP, etc. + string zone = 2; + + // When used for locality of upstream hosts, this field further splits zone + // into smaller chunks of sub-zones so they can be load balanced + // independently. + string sub_zone = 3; +} + +// BuildVersion combines SemVer version of extension with free-form build information +// (i.e. 'alpha', 'private-build') as a set of strings. +message BuildVersion { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BuildVersion"; + + // SemVer version of extension. + type.v3.SemanticVersion version = 1; + + // Free-form build information. + // Envoy defines several well known keys in the source/common/common/version.h file + google.protobuf.Struct metadata = 2; +} + +// Version and identification for an Envoy extension. +// [#next-free-field: 6] +message Extension { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Extension"; + + // This is the name of the Envoy filter as specified in the Envoy + // configuration, e.g. envoy.filters.http.router, com.acme.widget. + string name = 1; + + // Category of the extension. + // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" + // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from + // acme.com vendor. + // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] + string category = 2; + + // [#not-implemented-hide:] Type descriptor of extension configuration proto. + // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] + // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] + string type_descriptor = 3; + + // The version is a property of the extension and maintained independently + // of other extensions and the Envoy API. + // This field is not set when extension did not provide version information. + BuildVersion version = 4; + + // Indicates that the extension is present but was disabled via dynamic configuration. + bool disabled = 5; +} + +// Identifies a specific Envoy instance. The node identifier is presented to the +// management server, which may use this identifier to distinguish per Envoy +// configuration for serving. +// [#next-free-field: 12] +message Node { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Node"; + + reserved 5; + + reserved "build_version"; + + // An opaque node identifier for the Envoy node. This also provides the local + // service node name. It should be set if any of the following features are + // used: :ref:`statsd `, :ref:`CDS + // `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-node`. + string id = 1; + + // Defines the local service cluster name where Envoy is running. Though + // optional, it should be set if any of the following features are used: + // :ref:`statsd `, :ref:`health check cluster + // verification + // `, + // :ref:`runtime override directory `, + // :ref:`user agent addition + // `, + // :ref:`HTTP global rate limiting `, + // :ref:`CDS `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-cluster`. + string cluster = 2; + + // Opaque metadata extending the node identifier. Envoy will pass this + // directly to the management server. + google.protobuf.Struct metadata = 3; + + // Locality specifying where the Envoy instance is running. + Locality locality = 4; + + // Free-form string that identifies the entity requesting config. + // E.g. "envoy" or "grpc" + string user_agent_name = 6; + + oneof user_agent_version_type { + // Free-form string that identifies the version of the entity requesting config. + // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" + string user_agent_version = 7; + + // Structured version of the entity requesting config. + BuildVersion user_agent_build_version = 8; + } + + // List of extensions and their versions supported by the node. + repeated Extension extensions = 9; + + // Client feature support list. These are well known features described + // in the Envoy API repository for a given major version of an API. Client features + // use reverse DNS naming scheme, for example `com.acme.feature`. + // See :ref:`the list of features ` that xDS client may + // support. + repeated string client_features = 10; + + // Known listening ports on the node as a generic hint to the management server + // for filtering :ref:`listeners ` to be returned. For example, + // if there is a listener bound to port 80, the list can optionally contain the + // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. + repeated Address listening_addresses = 11; +} + +// Metadata provides additional inputs to filters based on matched listeners, +// filter chains, routes and endpoints. It is structured as a map, usually from +// filter name (in reverse DNS format) to metadata specific to the filter. Metadata +// key-values for a filter are merged as connection and request handling occurs, +// with later values for the same key overriding earlier values. +// +// An example use of metadata is providing additional values to +// http_connection_manager in the envoy.http_connection_manager.access_log +// namespace. +// +// Another example use of metadata is to per service config info in cluster metadata, which may get +// consumed by multiple filters. +// +// For load balancing, Metadata provides a means to subset cluster endpoints. +// Endpoints have a Metadata object associated and routes contain a Metadata +// object to match against. There are some well defined metadata used today for +// this purpose: +// +// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an +// endpoint and is also used during header processing +// (x-envoy-upstream-canary) and for stats purposes. +// [#next-major-version: move to type/metadata/v2] +message Metadata { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Metadata"; + + // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* + // namespace is reserved for Envoy's built-in filters. + map filter_metadata = 1; +} + +// Runtime derived uint32 with a default when not specified. +message RuntimeUInt32 { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeUInt32"; + + // Default value if runtime value is not available. + uint32 default_value = 2; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; +} + +// Runtime derived double with a default when not specified. +message RuntimeDouble { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeDouble"; + + // Default value if runtime value is not available. + double default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Runtime derived bool with a default when not specified. +message RuntimeFeatureFlag { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RuntimeFeatureFlag"; + + // Default value if runtime value is not available. + google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; + + // Runtime key to get value for comparison. This value is used if defined. The boolean value must + // be represented via its + // `canonical JSON encoding `_. + string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Header name/value pair. +message HeaderValue { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderValue"; + + // Header name. + string key = 1 + [(validate.rules).string = + {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Header value. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown header values are replaced with the empty string instead of `-`. + string value = 2 [ + (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; +} + +// Header name/value pair plus option to control append behavior. +message HeaderValueOption { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HeaderValueOption"; + + // Header name/value pair that this option applies to. + HeaderValue header = 1 [(validate.rules).message = {required: true}]; + + // Should the value be appended? If true (default), the value is appended to + // existing values. + google.protobuf.BoolValue append = 2; +} + +// Wrapper for a set of headers. +message HeaderMap { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderMap"; + + repeated HeaderValue headers = 1; +} + +// Data source consisting of either a file or an inline value. +message DataSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.DataSource"; + + oneof specifier { + option (validate.required) = true; + + // Local filesystem data source. + string filename = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Bytes inlined in the configuration. + bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; + + // String inlined in the configuration. + string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; + } +} + +// The message specifies the retry policy of remote data source when fetching fails. +message RetryPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RetryPolicy"; + + // Specifies parameters that control :ref:`retry backoff strategy `. + // This parameter is optional, in which case the default base interval is 1000 milliseconds. The + // default maximum interval is 10 times the base interval. + BackoffStrategy retry_back_off = 1; + + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. + google.protobuf.UInt32Value num_retries = 2; +} + +// The message specifies how to fetch data from remote and how to verify it. +message RemoteDataSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RemoteDataSource"; + + // The HTTP URI to fetch the remote data. + HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; + + // SHA256 string for verifying data. + string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Retry policy for fetching remote data. + RetryPolicy retry_policy = 3; +} + +// Async data source which support async data fetch. +message AsyncDataSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.AsyncDataSource"; + + oneof specifier { + option (validate.required) = true; + + // Local async data source. + DataSource local = 1; + + // Remote async data source. + RemoteDataSource remote = 2; + } +} + +// Configuration for transport socket in :ref:`listeners ` and +// :ref:`clusters `. If the configuration is +// empty, a default transport socket implementation and configuration will be +// chosen based on the platform and existence of tls_context. +message TransportSocket { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.TransportSocket"; + + reserved 2; + + reserved "config"; + + // The name of the transport socket to instantiate. The name must match a supported transport + // socket implementation. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Implementation specific configuration which depends on the implementation being instantiated. + // See the supported transport socket implementations for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not +// specified via a runtime key. +// +// .. note:: +// +// Parsing of the runtime key's data is implemented such that it may be represented as a +// :ref:`FractionalPercent ` proto represented as JSON/YAML +// and may also be represented as an integer with the assumption that the value is an integral +// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse +// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. +message RuntimeFractionalPercent { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RuntimeFractionalPercent"; + + // Default value if the runtime value's for the numerator/denominator keys are not available. + type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; + + // Runtime key for a YAML representation of a FractionalPercent. + string runtime_key = 2; +} + +// Identifies a specific ControlPlane instance that Envoy is connected to. +message ControlPlane { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ControlPlane"; + + // An opaque control plane identifier that uniquely identifies an instance + // of control plane. This can be used to identify which control plane instance, + // the Envoy is connected to. + string identifier = 1; +} diff --git a/api/envoy/config/core/v4alpha/config_source.proto b/api/envoy/config/core/v4alpha/config_source.proto new file mode 100644 index 000000000000..be600bd0096e --- /dev/null +++ b/api/envoy/config/core/v4alpha/config_source.proto @@ -0,0 +1,197 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ConfigSourceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Configuration sources] + +// xDS API version. This is used to describe both resource and transport +// protocol versions (in distinct configuration fields). +enum ApiVersion { + // When not specified, we assume v2, to ease migration to Envoy's stable API + // versioning. If a client does not support v2 (e.g. due to deprecation), this + // is an invalid value. + AUTO = 0; + + // Use xDS v2 API. + V2 = 1; + + // Use xDS v3 API. + V3 = 2; +} + +// API configuration source. This identifies the API type and cluster that Envoy +// will use to fetch an xDS API. +// [#next-free-field: 9] +message ApiConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ApiConfigSource"; + + // APIs may be fetched via either REST or gRPC. + enum ApiType { + // Ideally this would be 'reserved 0' but one can't reserve the default + // value. Instead we throw an exception if this is ever used. + DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 + [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; + + // REST-JSON v2 API. The `canonical JSON encoding + // `_ for + // the v2 protos is used. + REST = 1; + + // gRPC v2 API. + GRPC = 2; + + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + // with every update, the xDS server only sends what has changed since the last update. + // + // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. + // Do not use for other xDSes. + // [#comment:TODO(fredlas) update/remove this warning when appropriate.] + DELTA_GRPC = 3; + } + + // API type (gRPC, REST, delta gRPC) + ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; + + // Cluster names should be used only with REST. If > 1 + // cluster is defined, clusters will be cycled through if any kind of failure + // occurs. + // + // .. note:: + // + // The cluster with name ``cluster_name`` must be statically defined and its + // type must not be ``EDS``. + repeated string cluster_names = 2; + + // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, + // services will be cycled through if any kind of failure occurs. + repeated GrpcService grpc_services = 4; + + // For REST APIs, the delay between successive polls. + google.protobuf.Duration refresh_delay = 3; + + // For REST APIs, the request timeout. If not set, a default value of 1s will be used. + google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; + + // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be + // rate limited. + RateLimitSettings rate_limit_settings = 6; + + // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. + bool set_node_on_first_message_only = 7; +} + +// Aggregated Discovery Service (ADS) options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that ADS is to be used. +message AggregatedConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.AggregatedConfigSource"; +} + +// [#not-implemented-hide:] +// Self-referencing config source options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that other data can be obtained from the same server. +message SelfConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.SelfConfigSource"; +} + +// Rate Limit settings to be applied for discovery requests made by Envoy. +message RateLimitSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RateLimitSettings"; + + // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a + // default value of 100 will be used. + google.protobuf.UInt32Value max_tokens = 1; + + // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens + // per second will be used. + google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; +} + +// Configuration for :ref:`listeners `, :ref:`clusters +// `, :ref:`routes +// `, :ref:`endpoints +// ` etc. may either be sourced from the +// filesystem or from an xDS API source. Filesystem configs are watched with +// inotify for updates. +// [#next-free-field: 7] +message ConfigSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ConfigSource"; + + oneof config_source_specifier { + option (validate.required) = true; + + // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for :ref:`secret `, + // the certificate and key files are also watched for updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // Envoy will only watch the file path for *moves.* This is because in general only moves + // are atomic. The same method of swapping files as is demonstrated in the + // :ref:`runtime documentation ` can be used here also. + string path = 1; + + // API configuration source. + ApiConfigSource api_config_source = 2; + + // When set, ADS will be used to fetch resources. The ADS API configuration + // source in the bootstrap configuration is used. + AggregatedConfigSource ads = 3; + + // [#not-implemented-hide:] + // When set, the client will access the resources from the same server it got the + // ConfigSource from, although not necessarily from the same stream. This is similar to the + // :ref:`ads` field, except that the client may use a + // different stream to the same server. As a result, this field can be used for things + // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) + // LDS to RDS on the same server without requiring the management server to know its name + // or required credentials. + // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since + // this field can implicitly mean to use the same stream in the case where the ConfigSource + // is provided via ADS and the specified data can also be obtained via ADS.] + SelfConfigSource self = 5; + } + + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). The default is 15s. + google.protobuf.Duration initial_fetch_timeout = 4; + + // API version for xDS resources. This implies the type URLs that the client + // will request for resources and the resource type that the client will in + // turn expect to be delivered. + ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/api/envoy/config/core/v4alpha/event_service_config.proto b/api/envoy/config/core/v4alpha/event_service_config.proto new file mode 100644 index 000000000000..a0b4e5590d1d --- /dev/null +++ b/api/envoy/config/core/v4alpha/event_service_config.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "EventServiceConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#not-implemented-hide:] +// Configuration of the event reporting service endpoint. +message EventServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.EventServiceConfig"; + + oneof config_source_specifier { + option (validate.required) = true; + + // Specifies the gRPC service that hosts the event reporting service. + GrpcService grpc_service = 1; + } +} diff --git a/api/envoy/config/core/v4alpha/grpc_method_list.proto b/api/envoy/config/core/v4alpha/grpc_method_list.proto new file mode 100644 index 000000000000..a4a7be077b27 --- /dev/null +++ b/api/envoy/config/core/v4alpha/grpc_method_list.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "GrpcMethodListProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: gRPC method list] + +// A list of gRPC methods which can be used as an allowlist, for example. +message GrpcMethodList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcMethodList"; + + message Service { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcMethodList.Service"; + + // The name of the gRPC service. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The names of the gRPC methods in this service. + repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; + } + + repeated Service services = 1; +} diff --git a/api/envoy/config/core/v4alpha/grpc_service.proto b/api/envoy/config/core/v4alpha/grpc_service.proto new file mode 100644 index 000000000000..64bbc6b5f077 --- /dev/null +++ b/api/envoy/config/core/v4alpha/grpc_service.proto @@ -0,0 +1,261 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "GrpcServiceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: gRPC services] + +// gRPC service configuration. This is used by :ref:`ApiConfigSource +// ` and filter configurations. +// [#next-free-field: 6] +message GrpcService { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService"; + + message EnvoyGrpc { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.EnvoyGrpc"; + + // The name of the upstream gRPC cluster. SSL credentials will be supplied + // in the :ref:`Cluster ` :ref:`transport_socket + // `. + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // [#next-free-field: 7] + message GoogleGrpc { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc"; + + // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. + message SslCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials"; + + // PEM encoded server root certificates. + DataSource root_certs = 1; + + // PEM encoded client private key. + DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // PEM encoded client certificate chain. + DataSource cert_chain = 3; + } + + // Local channel credentials. Only UDS is supported for now. + // See https://github.com/grpc/grpc/pull/15909. + message GoogleLocalCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials"; + } + + // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call + // credential types. + message ChannelCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials"; + + oneof credential_specifier { + option (validate.required) = true; + + SslCredentials ssl_credentials = 1; + + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + google.protobuf.Empty google_default = 2; + + GoogleLocalCredentials local_credentials = 3; + } + } + + // [#next-free-field: 8] + message CallCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials"; + + message ServiceAccountJWTAccessCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." + "ServiceAccountJWTAccessCredentials"; + + string json_key = 1; + + uint64 token_lifetime_seconds = 2; + } + + message GoogleIAMCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials"; + + string authorization_token = 1; + + string authority_selector = 2; + } + + message MetadataCredentialsFromPlugin { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." + "MetadataCredentialsFromPlugin"; + + reserved 2; + + reserved "config"; + + string name = 1; + + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + // Security token service configuration that allows Google gRPC to + // fetch security token from an OAuth 2.0 authorization server. + // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and + // https://github.com/grpc/grpc/pull/19587. + // [#next-free-field: 10] + message StsService { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService"; + + // URI of the token exchange service that handles token exchange requests. + // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by + // https://github.com/envoyproxy/protoc-gen-validate/issues/303] + string token_exchange_service_uri = 1; + + // Location of the target service or resource where the client + // intends to use the requested security token. + string resource = 2; + + // Logical name of the target service where the client intends to + // use the requested security token. + string audience = 3; + + // The desired scope of the requested security token in the + // context of the service or resource where the token will be used. + string scope = 4; + + // Type of the requested security token. + string requested_token_type = 5; + + // The path of subject token, a security token that represents the + // identity of the party on behalf of whom the request is being made. + string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; + + // Type of the subject token. + string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; + + // The path of actor token, a security token that represents the identity + // of the acting party. The acting party is authorized to use the + // requested security token and act on behalf of the subject. + string actor_token_path = 8; + + // Type of the actor token. + string actor_token_type = 9; + } + + oneof credential_specifier { + option (validate.required) = true; + + // Access token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. + string access_token = 1; + + // Google Compute Engine credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + google.protobuf.Empty google_compute_engine = 2; + + // Google refresh token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. + string google_refresh_token = 3; + + // Service Account JWT Access credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. + ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; + + // Google IAM credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. + GoogleIAMCredentials google_iam = 5; + + // Custom authenticator credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. + // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. + MetadataCredentialsFromPlugin from_plugin = 6; + + // Custom security token service which implements OAuth 2.0 token exchange. + // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 + // See https://github.com/grpc/grpc/pull/19587. + StsService sts_service = 7; + } + } + + // The target URI when using the `Google C++ gRPC client + // `_. SSL credentials will be supplied in + // :ref:`channel_credentials `. + string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; + + ChannelCredentials channel_credentials = 2; + + // A set of call credentials that can be composed with `channel credentials + // `_. + repeated CallCredentials call_credentials = 3; + + // The human readable prefix to use when emitting statistics for the gRPC + // service. + // + // .. csv-table:: + // :header: Name, Type, Description + // :widths: 1, 1, 2 + // + // streams_total, Counter, Total number of streams opened + // streams_closed_, Counter, Total streams closed with + string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; + + // The name of the Google gRPC credentials factory to use. This must have been registered with + // Envoy. If this is empty, a default credentials factory will be used that sets up channel + // credentials based on other configuration parameters. + string credentials_factory_name = 5; + + // Additional configuration for site-specific customizations of the Google + // gRPC library. + google.protobuf.Struct config = 6; + } + + reserved 4; + + oneof target_specifier { + option (validate.required) = true; + + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + EnvoyGrpc envoy_grpc = 1; + + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + GoogleGrpc google_grpc = 2; + } + + // The timeout for the gRPC request. This is the timeout for a specific + // request. + google.protobuf.Duration timeout = 3; + + // Additional metadata to include in streams initiated to the GrpcService. + // This can be used for scenarios in which additional ad hoc authorization + // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + repeated HeaderValue initial_metadata = 5; +} diff --git a/api/envoy/config/core/v4alpha/health_check.proto b/api/envoy/config/core/v4alpha/health_check.proto new file mode 100644 index 000000000000..0e6c4e73c2a2 --- /dev/null +++ b/api/envoy/config/core/v4alpha/health_check.proto @@ -0,0 +1,321 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/event_service_config.proto"; +import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/v3/http.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health check] +// * Health checking :ref:`architecture overview `. +// * If health checking is configured for a cluster, additional statistics are emitted. They are +// documented :ref:`here `. + +// Endpoint health status. +enum HealthStatus { + // The health status is not known. This is interpreted by Envoy as *HEALTHY*. + UNKNOWN = 0; + + // Healthy. + HEALTHY = 1; + + // Unhealthy. + UNHEALTHY = 2; + + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as *UNHEALTHY*. + DRAINING = 3; + + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // *UNHEALTHY*. + TIMEOUT = 4; + + // Degraded. + DEGRADED = 5; +} + +// [#next-free-field: 23] +message HealthCheck { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; + + // Describes the encoding of the payload bytes in the payload. + message Payload { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.Payload"; + + oneof payload { + option (validate.required) = true; + + // Hex encoded payload. E.g., "000000FF". + string text = 1 [(validate.rules).string = {min_bytes: 1}]; + + // [#not-implemented-hide:] Binary payload. + bytes binary = 2; + } + } + + // [#next-free-field: 12] + message HttpHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.HttpHealthCheck"; + + reserved 5, 7; + + reserved "service_name", "use_http2"; + + // The value of the host header in the HTTP health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The host header can be customized for a specific endpoint by setting the + // :ref:`hostname ` field. + string host = 1; + + // Specifies the HTTP path that will be requested during health checking. For example + // */healthcheck*. + string path = 2 [(validate.rules).string = {min_bytes: 1}]; + + // [#not-implemented-hide:] HTTP specific payload. + Payload send = 3; + + // [#not-implemented-hide:] HTTP specific response. + Payload receive = 4; + + // Specifies a list of HTTP headers that should be added to each request that is sent to the + // health checked cluster. For more information, including details on header value syntax, see + // the documentation on :ref:`custom request headers + // `. + repeated HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request that is sent to the + // health checked cluster. + repeated string request_headers_to_remove = 8; + + // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default + // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open + // semantics of :ref:`Int64Range `. The start and end of each + // range are required. Only statuses in the range [100, 600) are allowed. + repeated type.v3.Int64Range expected_statuses = 9; + + // Use specified application protocol for health checks. + type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; + + // An optional service name parameter which is used to validate the identity of + // the health checked cluster using a :ref:`StringMatcher + // `. See the :ref:`architecture overview + // ` for more information. + type.matcher.v3.StringMatcher service_name_matcher = 11; + } + + message TcpHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.TcpHealthCheck"; + + // Empty payloads imply a connect-only health check. + Payload send = 1; + + // When checking the response, “fuzzy†matching is performed such that each + // binary block must be found, and in the order specified, but not + // necessarily contiguous. + repeated Payload receive = 2; + } + + message RedisHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.RedisHealthCheck"; + + // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value + // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + // by setting the specified key to any value and waiting for traffic to drain. + string key = 1; + } + + // `grpc.health.v1.Health + // `_-based + // healthcheck. See `gRPC doc `_ + // for details. + message GrpcHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.GrpcHealthCheck"; + + // An optional service name parameter which will be sent to gRPC service in + // `grpc.health.v1.HealthCheckRequest + // `_. + // message. See `gRPC health-checking overview + // `_ for more information. + string service_name = 1; + + // The value of the :authority header in the gRPC health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The authority header can be customized for a specific endpoint by setting + // the :ref:`hostname ` field. + string authority = 2; + } + + // Custom health check. + message CustomHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.CustomHealthCheck"; + + reserved 2; + + reserved "config"; + + // The registered name of the custom health checker. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // A custom health checker specific configuration which depends on the custom health checker + // being instantiated. See :api:`envoy/config/health_checker` for reference. + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + // Health checks occur over the transport socket specified for the cluster. This implies that if a + // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. + // + // This allows overriding the cluster TLS settings, just for health check connections. + message TlsOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.TlsOptions"; + + // Specifies the ALPN protocols for health check connections. This is useful if the + // corresponding upstream is using ALPN-based :ref:`FilterChainMatch + // ` along with different protocols for health checks + // versus data connections. If empty, no ALPN protocols will be set on health check connections. + repeated string alpn_protocols = 1; + } + + reserved 10; + + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + google.protobuf.Duration timeout = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // The interval between health checks. + google.protobuf.Duration interval = 2 [(validate.rules).duration = { + required: true + gt {} + }]; + + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + google.protobuf.Duration initial_jitter = 20; + + // An optional jitter amount in milliseconds. If specified, during every + // interval Envoy will add interval_jitter to the wait time. + google.protobuf.Duration interval_jitter = 3; + + // An optional jitter amount as a percentage of interval_ms. If specified, + // during every interval Envoy will add interval_ms * + // interval_jitter_percent / 100 to the wait time. + // + // If interval_jitter_ms and interval_jitter_percent are both set, both of + // them will be used to increase the wait time. + uint32 interval_jitter_percent = 18; + + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for *http* health checking if a host responds with 503 + // this threshold is ignored and the host is considered unhealthy immediately. + google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; + + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] Non-serving port for health checking. + google.protobuf.UInt32Value alt_port = 6; + + // Reuse health check connection between health checks. Default is true. + google.protobuf.BoolValue reuse_connection = 7; + + oneof health_checker { + option (validate.required) = true; + + // HTTP health check. + HttpHealthCheck http_health_check = 8; + + // TCP health check. + TcpHealthCheck tcp_health_check = 9; + + // gRPC health check. + GrpcHealthCheck grpc_health_check = 11; + + // Custom health check. + CustomHealthCheck custom_health_check = 13; + } + + // The "no traffic interval" is a special health check interval that is used when a cluster has + // never had traffic routed to it. This lower interval allows cluster information to be kept up to + // date, without sending a potentially large amount of active health checking traffic for no + // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. Note that this interval takes precedence over + // any other. + // + // The default value for "no traffic interval" is 60 seconds. + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as + // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the + // standard health check interval that is defined. + // + // The default value for "unhealthy interval" is the same as "interval". + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; + + // The "unhealthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as unhealthy. For subsequent health checks + // Envoy will shift back to using either "unhealthy interval" if present or the standard health + // check interval that is defined. + // + // The default value for "unhealthy edge interval" is the same as "unhealthy interval". + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; + + // The "healthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as healthy. For subsequent health checks + // Envoy will shift back to using the standard health check interval that is defined. + // + // The default value for "healthy edge interval" is the same as the default interval. + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; + + // Specifies the path to the :ref:`health check event log `. + // If empty, no event log will be written. + string event_log_path = 17; + + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventServiceConfig event_service = 22; + + // If set to true, health check failure events will always be logged. If set to false, only the + // initial health check failure event will be logged. + // The default value is false. + bool always_log_health_check_failures = 19; + + // This allows overriding the cluster TLS settings, just for health check connections. + TlsOptions tls_options = 21; +} diff --git a/api/envoy/config/core/v4alpha/http_uri.proto b/api/envoy/config/core/v4alpha/http_uri.proto new file mode 100644 index 000000000000..e88a9aa7d7df --- /dev/null +++ b/api/envoy/config/core/v4alpha/http_uri.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "HttpUriProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP Service URI ] + +// Envoy external URI descriptor +message HttpUri { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HttpUri"; + + // The HTTP server URI. It should be a full FQDN with protocol, host and path. + // + // Example: + // + // .. code-block:: yaml + // + // uri: https://www.googleapis.com/oauth2/v1/certs + // + string uri = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify how `uri` is to be fetched. Today, this requires an explicit + // cluster, but in the future we may support dynamic cluster creation or + // inline DNS resolution. See `issue + // `_. + oneof http_upstream_type { + option (validate.required) = true; + + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + // + string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + google.protobuf.Duration timeout = 3 [(validate.rules).duration = { + required: true + gte {} + }]; +} diff --git a/api/envoy/config/core/v4alpha/protocol.proto b/api/envoy/config/core/v4alpha/protocol.proto new file mode 100644 index 000000000000..dcb205444524 --- /dev/null +++ b/api/envoy/config/core/v4alpha/protocol.proto @@ -0,0 +1,323 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Protocol options] + +// [#not-implemented-hide:] +message TcpProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.TcpProtocolOptions"; +} + +message UpstreamHttpProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.UpstreamHttpProtocolOptions"; + + // Set transport socket `SNI `_ for new + // upstream connections based on the downstream HTTP host/authority header, as seen by the + // :ref:`router filter `. + bool auto_sni = 1; + + // Automatic validate upstream presented certificate for new upstream connections based on the + // downstream HTTP host/authority header, as seen by the + // :ref:`router filter `. + // This field is intended to set with `auto_sni` field. + bool auto_san_validation = 2; +} + +// [#next-free-field: 6] +message HttpProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HttpProtocolOptions"; + + // Action to take when Envoy receives client request with header names containing underscore + // characters. + // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + // characters. + enum HeadersWithUnderscoresAction { + // Allow headers with underscores. This is the default behavior. + ALLOW = 0; + + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + REJECT_REQUEST = 1; + + // Drop the header with name containing underscores. The header is dropped before the filter chain is + // invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + DROP_HEADER = 2; + } + + // The idle timeout for connections. The idle timeout is defined as the + // period in which there are no active requests. When the + // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 + // downstream connection a drain sequence will occur prior to closing the connection, see + // :ref:`drain_timeout + // `. + // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. + // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. + // + // .. warning:: + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. + google.protobuf.Duration idle_timeout = 1; + + // The maximum duration of a connection. The duration is defined as a period since a connection + // was established. If not set, there is no max duration. When max_connection_duration is reached + // the connection will be closed. Drain sequence will occur prior to closing the connection if + // if's applicable. See :ref:`drain_timeout + // `. + // Note: not implemented for upstream connections. + google.protobuf.Duration max_connection_duration = 3; + + // The maximum number of headers. If unconfigured, the default + // maximum number of request headers allowed is 100. Requests that exceed this limit will receive + // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. + google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be + // reset independent of any other timeouts. If not specified, this value is not set. + // The current implementation implements this timeout on downstream connections only. + // [#comment:TODO(shikugawa): add this functionality to upstream.] + google.protobuf.Duration max_stream_duration = 4; + + // Action to take when a client request with a header name containing underscore characters is received. + // If this setting is not specified, the value defaults to ALLOW. + // Note: upstream responses are not affected by this setting. + HeadersWithUnderscoresAction headers_with_underscores_action = 5; +} + +// [#next-free-field: 6] +message Http1ProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http1ProtocolOptions"; + + message HeaderKeyFormat { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat"; + + message ProperCaseWords { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords"; + } + + oneof header_format { + option (validate.required) = true; + + // Formats the header by proper casing words: the first character and any character following + // a special character will be capitalized if it's an alpha character. For example, + // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". + // Note that while this results in most headers following conventional casing, certain headers + // are not covered. For example, the "TE" header will be formatted as "Te". + ProperCaseWords proper_case_words = 1; + } + } + + // Handle HTTP requests with absolute URLs in the requests. These requests + // are generally sent by clients to forward/explicit proxies. This allows clients to configure + // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the + // *http_proxy* environment variable. + google.protobuf.BoolValue allow_absolute_url = 1; + + // Handle incoming HTTP/1.0 and HTTP 0.9 requests. + // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 + // style connect logic, dechunking, and handling lack of client host iff + // *default_host_for_http_10* is configured. + bool accept_http_10 = 2; + + // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as + // Envoy does not otherwise support HTTP/1.0 without a Host header. + // This is a no-op if *accept_http_10* is not true. + string default_host_for_http_10 = 3; + + // Describes how the keys for response headers should be formatted. By default, all header keys + // are lower cased. + HeaderKeyFormat header_key_format = 4; + + // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. + // + // .. attention:: + // + // Note that this only happens when Envoy is chunk encoding which occurs when: + // - The request is HTTP/1.1. + // - Is neither a HEAD only request nor a HTTP Upgrade. + // - Not a response to a HEAD request. + // - The content length header is not present. + bool enable_trailers = 5; +} + +// [#next-free-field: 14] +message Http2ProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http2ProtocolOptions"; + + // Defines a parameter to be sent in the SETTINGS frame. + // See `RFC7540, sec. 6.5.1 `_ for details. + message SettingsParameter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter"; + + // The 16 bit parameter identifier. + google.protobuf.UInt32Value identifier = 1 [ + (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).message = {required: true} + ]; + + // The 32 bit parameter value. + google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; + } + + // `Maximum table size `_ + // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values + // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header + // compression. + google.protobuf.UInt32Value hpack_table_size = 1; + + // `Maximum concurrent streams `_ + // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) + // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). + google.protobuf.UInt32Value max_concurrent_streams = 2 + [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; + + // `Initial stream-level flow-control window + // `_ size. Valid values range from 65535 + // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 + // (256 * 1024 * 1024). + // + // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default + // window size now, so it's also the minimum. + // + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the codec buffers. + google.protobuf.UInt32Value initial_stream_window_size = 3 + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; + + // Similar to *initial_stream_window_size*, but for connection-level flow-control + // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. + google.protobuf.UInt32Value initial_connection_window_size = 4 + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; + + // Allows proxying Websocket and other upgrades over H2 connect. + bool allow_connect = 5; + + // [#not-implemented-hide:] Hiding until envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows metadata. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more + // information. + bool allow_metadata = 6; + + // Limit the number of pending outbound downstream frames of all types (frames that are waiting to + // be written into the socket). Exceeding this limit triggers flood mitigation and connection is + // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due + // to flood mitigation. The default limit is 10000. + // [#comment:TODO: implement same limits for upstream outbound frames as well.] + google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; + + // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, + // preventing high memory utilization when receiving continuous stream of these frames. Exceeding + // this limit triggers flood mitigation and connection is terminated. The + // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood + // mitigation. The default limit is 1000. + // [#comment:TODO: implement same limits for upstream outbound frames as well.] + google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; + + // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an + // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but + // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` + // stat tracks the number of connections terminated due to flood mitigation. + // Setting this to 0 will terminate connection upon receiving first frame with an empty payload + // and no end stream flag. The default limit is 1. + // [#comment:TODO: implement same limits for upstream inbound frames as well.] + google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; + + // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number + // of PRIORITY frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // max_inbound_priority_frames_per_stream * (1 + inbound_streams) + // + // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks + // the number of connections terminated due to flood mitigation. The default limit is 100. + // [#comment:TODO: implement same limits for upstream inbound frames as well.] + google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; + + // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number + // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // 1 + 2 * (inbound_streams + + // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) + // + // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks + // the number of connections terminated due to flood mitigation. The default limit is 10. + // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, + // but more complex implementations that try to estimate available bandwidth require at least 2. + // [#comment:TODO: implement same limits for upstream inbound frames as well.] + google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 + [(validate.rules).uint32 = {gte: 1}]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // See `RFC7540, sec. 8.1 `_ for details. + bool stream_error_on_invalid_http_messaging = 12; + + // [#not-implemented-hide:] + // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: + // + // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by + // Envoy. + // + // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field + // 'allow_connect'. + // + // Note that custom parameters specified through this field can not also be set in the + // corresponding named parameters: + // + // .. code-block:: text + // + // ID Field Name + // ---------------- + // 0x1 hpack_table_size + // 0x3 max_concurrent_streams + // 0x4 initial_stream_window_size + // + // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies + // between custom parameters with the same identifier will trigger a failure. + // + // See `IANA HTTP/2 Settings + // `_ for + // standardized identifiers. + repeated SettingsParameter custom_settings_parameters = 13; +} + +// [#not-implemented-hide:] +message GrpcProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcProtocolOptions"; + + Http2ProtocolOptions http2_protocol_options = 1; +} diff --git a/api/envoy/config/core/v4alpha/socket_option.proto b/api/envoy/config/core/v4alpha/socket_option.proto new file mode 100644 index 000000000000..7dac394a865d --- /dev/null +++ b/api/envoy/config/core/v4alpha/socket_option.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "SocketOptionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Socket Option ] + +// Generic socket option message. This would be used to set socket options that +// might not exist in upstream kernels or precompiled Envoy binaries. +// [#next-free-field: 7] +message SocketOption { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketOption"; + + enum SocketState { + // Socket options are applied after socket creation but before binding the socket to a port + STATE_PREBIND = 0; + + // Socket options are applied after binding the socket to a port but before calling listen() + STATE_BOUND = 1; + + // Socket options are applied after calling listen() + STATE_LISTENING = 2; + } + + // An optional name to give this socket option for debugging, etc. + // Uniqueness is not required and no special meaning is assumed. + string description = 1; + + // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP + int64 level = 2; + + // The numeric name as passed to setsockopt + int64 name = 3; + + oneof value { + option (validate.required) = true; + + // Because many sockopts take an int value. + int64 int_value = 4; + + // Otherwise it's a byte buffer. + bytes buf_value = 5; + } + + // The state in which the option will be applied. When used in BindConfig + // STATE_PREBIND is currently the only valid value. + SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/api/envoy/config/endpoint/v3/endpoint.proto b/api/envoy/config/endpoint/v3/endpoint.proto index 3b521e37aee3..008b4ddc4993 100644 --- a/api/envoy/config/endpoint/v3/endpoint.proto +++ b/api/envoy/config/endpoint/v3/endpoint.proto @@ -9,13 +9,14 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Endpoint configuration] // Endpoint discovery :ref:`architecture overview ` diff --git a/api/envoy/config/endpoint/v3/endpoint_components.proto b/api/envoy/config/endpoint/v3/endpoint_components.proto index 2bb219151efd..ce7048b5baca 100644 --- a/api/envoy/config/endpoint/v3/endpoint_components.proto +++ b/api/envoy/config/endpoint/v3/endpoint_components.proto @@ -8,13 +8,14 @@ import "envoy/config/core/v3/health_check.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; option java_outer_classname = "EndpointComponentsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Endpoints] @@ -34,6 +35,13 @@ message Endpoint { // check port. Setting this with a non-zero value allows an upstream host // to have different health check address port. uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; + + // By default, the host header for L7 health checks is controlled by cluster level configuration + // (see: :ref:`host ` and + // :ref:`authority `). Setting this + // to a non-empty value allows overriding the cluster level configuration for a specific + // endpoint. + string hostname = 2; } // The upstream host address. @@ -55,6 +63,12 @@ message Endpoint { // This takes into effect only for upstream clusters with // :ref:`active health checking ` enabled. HealthCheckConfig health_check_config = 2; + + // The hostname associated with this endpoint. This hostname is not used for routing or address + // resolution. If provided, it will be associated with the endpoint, and can be used for features + // that require a hostname, like + // :ref:`auto_host_rewrite `. + string hostname = 3; } // An Endpoint that Envoy can route traffic to. diff --git a/api/envoy/config/endpoint/v3/load_report.proto b/api/envoy/config/endpoint/v3/load_report.proto index 2f0454d94320..01eb7b12cf1a 100644 --- a/api/envoy/config/endpoint/v3/load_report.proto +++ b/api/envoy/config/endpoint/v3/load_report.proto @@ -8,13 +8,14 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // These are stats Envoy reports to GLB every so often. Report frequency is // defined by diff --git a/api/envoy/config/filter/accesslog/v2/accesslog.proto b/api/envoy/config/filter/accesslog/v2/accesslog.proto index 8a525dee9108..25d27bfbd106 100644 --- a/api/envoy/config/filter/accesslog/v2/accesslog.proto +++ b/api/envoy/config/filter/accesslog/v2/accesslog.proto @@ -10,12 +10,14 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.accesslog.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common access log types] diff --git a/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto b/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto index 19f7697a68f1..2e35bb7f7c5b 100644 --- a/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto +++ b/api/envoy/config/filter/dubbo/router/v2alpha1/router.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.dubbo.router.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.dubbo_proxy.router.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Router] // Dubbo router :ref:`configuration overview `. diff --git a/api/envoy/config/filter/fault/v2/fault.proto b/api/envoy/config/filter/fault/v2/fault.proto index d0d12c07a64d..016140d10f84 100644 --- a/api/envoy/config/filter/fault/v2/fault.proto +++ b/api/envoy/config/filter/fault/v2/fault.proto @@ -8,12 +8,14 @@ import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.fault.v2"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.common.fault.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common fault injection types] diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto index 98465ab97336..bd9da5a67766 100644 --- a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto +++ b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto @@ -10,6 +10,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha"; @@ -17,6 +18,7 @@ option java_outer_classname = "AdaptiveConcurrencyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.adaptive_concurrency.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Adaptive Concurrency] // Adaptive Concurrency Control :ref:`configuration overview diff --git a/api/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto b/api/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto index cd9e1d30e887..43823286286a 100644 --- a/api/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto +++ b/api/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto @@ -2,9 +2,8 @@ syntax = "proto3"; package envoy.config.filter.http.aws_lambda.v2alpha; -import "udpa/annotations/status.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.aws_lambda.v2alpha"; @@ -13,6 +12,7 @@ option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.aws_lambda.v3"; option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: AWS Lambda] // AWS Lambda :ref:`configuration overview `. @@ -20,6 +20,17 @@ option (udpa.annotations.file_status).work_in_progress = true; // AWS Lambda filter config message Config { + enum InvocationMode { + // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In + // this mode the output of the Lambda function becomes the response of the HTTP request. + SYNCHRONOUS = 0; + + // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be + // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the + // call which is translated to an HTTP 200 OK by the filter. + ASYNCHRONOUS = 1; + } + // The ARN of the AWS Lambda to invoke when the filter is engaged // Must be in the following format: // arn::lambda:::function: @@ -27,6 +38,9 @@ message Config { // Whether to transform the request (headers and body) to a JSON payload or pass it as is. bool payload_passthrough = 2; + + // Determines the way to invoke the Lambda function. + InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; } // Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different diff --git a/api/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto b/api/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto index 40e0bd9fcc69..5ebb92c01dfa 100644 --- a/api/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto +++ b/api/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.aws_request_signing.v2alpha; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.aws_request_signing.v2alpha"; @@ -10,6 +11,7 @@ option java_outer_classname = "AwsRequestSigningProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.aws_request_signing.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: AwsRequestSigning] // AwsRequestSigning :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/buffer/v2/buffer.proto b/api/envoy/config/filter/http/buffer/v2/buffer.proto index 00e0116a926d..56961d22fe09 100644 --- a/api/envoy/config/filter/http/buffer/v2/buffer.proto +++ b/api/envoy/config/filter/http/buffer/v2/buffer.proto @@ -5,12 +5,14 @@ package envoy.config.filter.http.buffer.v2; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v2"; option java_outer_classname = "BufferProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.buffer.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Buffer] // Buffer :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/cache/v2alpha/cache.proto b/api/envoy/config/filter/http/cache/v2alpha/cache.proto index 4005b32a55c4..a9e51cf56a10 100644 --- a/api/envoy/config/filter/http/cache/v2alpha/cache.proto +++ b/api/envoy/config/filter/http/cache/v2alpha/cache.proto @@ -7,9 +7,8 @@ import "envoy/type/matcher/string.proto"; import "google/protobuf/any.proto"; -import "udpa/annotations/status.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.cache.v2alpha"; @@ -18,6 +17,7 @@ option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.cache.v3alpha"; option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP Cache Filter] // [#extension: envoy.filters.http.cache] diff --git a/api/envoy/config/filter/http/compressor/v2/compressor.proto b/api/envoy/config/filter/http/compressor/v2/compressor.proto index 54814f982073..d62d0d7a42fa 100644 --- a/api/envoy/config/filter/http/compressor/v2/compressor.proto +++ b/api/envoy/config/filter/http/compressor/v2/compressor.proto @@ -7,12 +7,14 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.compressor.v2"; option java_outer_classname = "CompressorProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.compressor.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Compressor] diff --git a/api/envoy/config/filter/http/cors/v2/cors.proto b/api/envoy/config/filter/http/cors/v2/cors.proto index 73c9efc62544..9060a9c38fda 100644 --- a/api/envoy/config/filter/http/cors/v2/cors.proto +++ b/api/envoy/config/filter/http/cors/v2/cors.proto @@ -3,11 +3,13 @@ syntax = "proto3"; package envoy.config.filter.http.cors.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.cors.v2"; option java_outer_classname = "CorsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.cors.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Cors] // CORS Filter :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/csrf/v2/csrf.proto b/api/envoy/config/filter/http/csrf/v2/csrf.proto index ce38714bf45f..3c2c9110e9fe 100644 --- a/api/envoy/config/filter/http/csrf/v2/csrf.proto +++ b/api/envoy/config/filter/http/csrf/v2/csrf.proto @@ -6,12 +6,14 @@ import "envoy/api/v2/core/base.proto"; import "envoy/type/matcher/string.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2"; option java_outer_classname = "CsrfProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.csrf.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: CSRF] // Cross-Site Request Forgery :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index 17509a111415..29aa8380191b 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -5,6 +5,7 @@ package envoy.config.filter.http.dynamic_forward_proxy.v2alpha; import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha"; @@ -12,6 +13,7 @@ option java_outer_classname = "DynamicForwardProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.dynamic_forward_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamic forward proxy] diff --git a/api/envoy/config/filter/http/dynamo/v2/dynamo.proto b/api/envoy/config/filter/http/dynamo/v2/dynamo.proto index 8de88a959209..011d22f768c8 100644 --- a/api/envoy/config/filter/http/dynamo/v2/dynamo.proto +++ b/api/envoy/config/filter/http/dynamo/v2/dynamo.proto @@ -3,11 +3,13 @@ syntax = "proto3"; package envoy.config.filter.http.dynamo.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.dynamo.v2"; option java_outer_classname = "DynamoProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.dynamo.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamo] // Dynamo :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index a67e4dd6087a..a407f4628d2e 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -10,6 +10,7 @@ import "envoy/type/matcher/string.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; @@ -17,6 +18,7 @@ option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.ext_authz.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/fault/v2/fault.proto b/api/envoy/config/filter/http/fault/v2/fault.proto index 9ce49288076f..cb99b0d71bbd 100644 --- a/api/envoy/config/filter/http/fault/v2/fault.proto +++ b/api/envoy/config/filter/http/fault/v2/fault.proto @@ -9,12 +9,14 @@ import "envoy/type/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v2"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.fault.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Fault Injection] // Fault Injection :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto b/api/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto index d1ba0b628987..b4331dad5031 100644 --- a/api/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto +++ b/api/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.http.grpc_http1_bridge.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_bridge.v2"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_http1_bridge.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC HTTP/1.1 Bridge] // gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto index 6869b316b5e2..8b916d327e19 100644 --- a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto +++ b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] // gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview diff --git a/api/envoy/config/filter/http/grpc_stats/v2alpha/BUILD b/api/envoy/config/filter/http/grpc_stats/v2alpha/BUILD index ef3541ebcb1d..69168ad0cf24 100644 --- a/api/envoy/config/filter/http/grpc_stats/v2alpha/BUILD +++ b/api/envoy/config/filter/http/grpc_stats/v2alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto b/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto index ccfb6e50dab7..7f6dd2ce4226 100644 --- a/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto +++ b/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto @@ -2,7 +2,12 @@ syntax = "proto3"; package envoy.config.filter.http.grpc_stats.v2alpha; +import "envoy/api/v2/core/grpc_method_list.proto"; + +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_stats.v2alpha"; @@ -10,6 +15,7 @@ option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_stats.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC statistics] gRPC statistics filter // :ref:`configuration overview `. @@ -20,6 +26,33 @@ message FilterConfig { // If true, the filter maintains a filter state object with the request and response message // counts. bool emit_filter_state = 1; + + oneof per_method_stat_specifier { + // If set, specifies an allowlist of service/methods that will have individual stats + // emitted for them. Any call that does not match the allowlist will be counted + // in a stat with no method specifier: `cluster..grpc.*`. + api.v2.core.GrpcMethodList individual_method_stats_allowlist = 2; + + // If set to true, emit stats for all service/method names. + // + // If set to false, emit stats for all service/message types to the same stats without including + // the service/method in the name, with prefix `cluster..grpc`. This can be useful if + // service/method granularity is not needed, or if each cluster only receives a single method. + // + // .. attention:: + // This option is only safe if all clients are trusted. If this option is enabled + // with untrusted clients, the clients could cause unbounded growth in the number of stats in + // Envoy, using unbounded memory and potentially slowing down stats pipelines. + // + // .. attention:: + // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the + // behavior will default to `stats_for_all_methods=true`. This default value is deprecated, + // and in a future release, if neither field is set, it will default to + // `stats_for_all_methods=false` in order to be safe by default. This behavior can be + // controlled with runtime override + // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. + google.protobuf.BoolValue stats_for_all_methods = 3; + } } // gRPC statistics filter state object in protobuf form. diff --git a/api/envoy/config/filter/http/grpc_web/v2/grpc_web.proto b/api/envoy/config/filter/http/grpc_web/v2/grpc_web.proto index 42cd3a13f842..be23b4d87b58 100644 --- a/api/envoy/config/filter/http/grpc_web/v2/grpc_web.proto +++ b/api/envoy/config/filter/http/grpc_web/v2/grpc_web.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.http.grpc_web.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_web.v2"; option java_outer_classname = "GrpcWebProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_web.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Web] // gRPC Web :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/gzip/v2/gzip.proto b/api/envoy/config/filter/http/gzip/v2/gzip.proto index 85fe6fbdc1d1..f3601b612b02 100644 --- a/api/envoy/config/filter/http/gzip/v2/gzip.proto +++ b/api/envoy/config/filter/http/gzip/v2/gzip.proto @@ -7,12 +7,14 @@ import "envoy/config/filter/http/compressor/v2/compressor.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v2"; option java_outer_classname = "GzipProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.gzip.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Gzip] // Gzip :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto index 0ef96f17cc21..30de69d98b1c 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto +++ b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.header_to_metadata.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2"; @@ -10,6 +11,7 @@ option java_outer_classname = "HeaderToMetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.header_to_metadata.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Header-To-Metadata Filter] // @@ -69,7 +71,8 @@ message Config { // A Rule defines what metadata to apply when a header is present or missing. message Rule { // The header that triggers this rule — required. - string header = 1 [(validate.rules).string = {min_bytes: 1}]; + string header = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If the header is present, apply this metadata KeyValuePair. // diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto index e57cd0893112..d7f6da8c82d4 100644 --- a/api/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto @@ -9,6 +9,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2"; @@ -16,6 +17,7 @@ option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.health_check.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto index 87582ab554f5..f99b18a12c71 100644 --- a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto +++ b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto @@ -5,6 +5,7 @@ package envoy.config.filter.http.ip_tagging.v2; import "envoy/api/v2/core/address.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v2"; @@ -12,6 +13,7 @@ option java_outer_classname = "IpTaggingProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.ip_tagging.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: IP tagging] // IP tagging :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index 3f40f89cdbcc..07044f92201e 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -10,6 +10,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha"; @@ -17,6 +18,7 @@ option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.jwt_authn.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: JWT Authentication] // JWT Authentication :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/lua/v2/lua.proto b/api/envoy/config/filter/http/lua/v2/lua.proto index c10cbe170cd4..068b5e255df5 100644 --- a/api/envoy/config/filter/http/lua/v2/lua.proto +++ b/api/envoy/config/filter/http/lua/v2/lua.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.http.lua.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v2"; option java_outer_classname = "LuaProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.lua.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Lua] // Lua :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/on_demand/v2/on_demand.proto b/api/envoy/config/filter/http/on_demand/v2/on_demand.proto index 2ace47716f13..74d0ee408aeb 100644 --- a/api/envoy/config/filter/http/on_demand/v2/on_demand.proto +++ b/api/envoy/config/filter/http/on_demand/v2/on_demand.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.on_demand.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.on_demand.v2"; @@ -10,6 +11,7 @@ option java_outer_classname = "OnDemandProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.on_demand.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: OnDemand] // IP tagging :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto index 5f772436ad48..0baf49cebeef 100644 --- a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto +++ b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.original_src.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.original_src.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. diff --git a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto index 027cc8e7af03..b9361476bcfd 100644 --- a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto @@ -7,6 +7,7 @@ import "envoy/config/ratelimit/v2/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v2"; @@ -14,6 +15,7 @@ option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/rbac/v2/rbac.proto b/api/envoy/config/filter/http/rbac/v2/rbac.proto index 5f3292d41d50..691f23036ba8 100644 --- a/api/envoy/config/filter/http/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/http/rbac/v2/rbac.proto @@ -5,12 +5,14 @@ package envoy.config.filter.http.rbac.v2; import "envoy/config/rbac/v2/rbac.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v2"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.rbac.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/router/v2/router.proto b/api/envoy/config/filter/http/router/v2/router.proto index a94641cf33a5..c95500cf8168 100644 --- a/api/envoy/config/filter/http/router/v2/router.proto +++ b/api/envoy/config/filter/http/router/v2/router.proto @@ -7,12 +7,14 @@ import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.router.v2"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.router.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Router] // Router :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/squash/v2/squash.proto b/api/envoy/config/filter/http/squash/v2/squash.proto index ae159423d86b..a7ae625d2ee3 100644 --- a/api/envoy/config/filter/http/squash/v2/squash.proto +++ b/api/envoy/config/filter/http/squash/v2/squash.proto @@ -6,12 +6,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v2"; option java_outer_classname = "SquashProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.squash.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Squash] // Squash :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/tap/v2alpha/tap.proto b/api/envoy/config/filter/http/tap/v2alpha/tap.proto index 840082448454..3f984cec0d6c 100644 --- a/api/envoy/config/filter/http/tap/v2alpha/tap.proto +++ b/api/envoy/config/filter/http/tap/v2alpha/tap.proto @@ -5,12 +5,14 @@ package envoy.config.filter.http.tap.v2alpha; import "envoy/config/common/tap/v2alpha/common.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.tap.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto index c7636652c520..ac6d7eefa78a 100644 --- a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto +++ b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.transcoder.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v2"; @@ -10,6 +11,7 @@ option java_outer_classname = "TranscoderProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_json_transcoder.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC-JSON transcoder] // gRPC-JSON transcoder :ref:`configuration overview `. diff --git a/api/envoy/config/filter/http/wasm/v2/wasm.proto b/api/envoy/config/filter/http/wasm/v2/wasm.proto deleted file mode 100644 index 001dda83a1ae..000000000000 --- a/api/envoy/config/filter/http/wasm/v2/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.wasm.v2; - -import "envoy/config/wasm/v2/wasm.proto"; - -import "udpa/annotations/migrate.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.wasm.v2"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.wasm.v3"; - -// [#protodoc-title: Wasm] -// Wasm :ref:`configuration overview `. - -message Wasm { - // General Plugin configuration. - config.wasm.v2.PluginConfig config = 1; -} diff --git a/api/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto b/api/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto index 0e87ff300968..0496207e09bc 100644 --- a/api/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto +++ b/api/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.listener.http_inspector.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.http_inspector.v2"; option java_outer_classname = "HttpInspectorProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.http_inspector.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP Inspector Filter] // Detect whether the application protocol is HTTP. diff --git a/api/envoy/config/filter/listener/original_dst/v2/original_dst.proto b/api/envoy/config/filter/listener/original_dst/v2/original_dst.proto index ab210ad4805d..fa4acee45fc1 100644 --- a/api/envoy/config/filter/listener/original_dst/v2/original_dst.proto +++ b/api/envoy/config/filter/listener/original_dst/v2/original_dst.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.listener.original_dst.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.original_dst.v2"; option java_outer_classname = "OriginalDstProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.original_dst.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Original Dst Filter] // Use the Original destination address on downstream connections. diff --git a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto index 227a8a2572d2..1959698fd100 100644 --- a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto +++ b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.listener.original_src.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.original_src.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. diff --git a/api/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto b/api/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto index 4749434f98dc..cabffb9fc0c0 100644 --- a/api/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto +++ b/api/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.listener.proxy_protocol.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.proxy_protocol.v2"; option java_outer_classname = "ProxyProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.proxy_protocol.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Proxy Protocol Filter] // PROXY protocol listener filter. diff --git a/api/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto b/api/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto index fbdcc74a2a5b..7ab679c47dc5 100644 --- a/api/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto +++ b/api/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.listener.tls_inspector.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.tls_inspector.v2"; option java_outer_classname = "TlsInspectorProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.tls_inspector.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: TLS Inspector Filter] // Allows detecting whether the transport appears to be TLS or plaintext. diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto index 68ef0c91a82d..d1f459078f20 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -7,6 +7,7 @@ import "envoy/api/v2/core/address.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2"; @@ -14,6 +15,7 @@ option java_outer_classname = "ClientSslAuthProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.client_ssl_auth.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Client TLS authentication] // Client TLS authentication diff --git a/api/envoy/config/filter/network/direct_response/v2/config.proto b/api/envoy/config/filter/network/direct_response/v2/config.proto index 758145d6480f..15de7e3b5537 100644 --- a/api/envoy/config/filter/network/direct_response/v2/config.proto +++ b/api/envoy/config/filter/network/direct_response/v2/config.proto @@ -5,12 +5,14 @@ package envoy.config.filter.network.direct_response.v2; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.direct_response.v2"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.direct_response.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Direct response] // Direct response :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto index 3ede3eca22d4..47248932f94c 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto @@ -7,6 +7,7 @@ import "envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; @@ -14,6 +15,7 @@ option java_outer_classname = "DubboProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.dubbo_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto index a760309a160a..9af461e3577c 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto @@ -7,6 +7,7 @@ import "envoy/type/matcher/string.proto"; import "envoy/type/range.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; @@ -14,6 +15,7 @@ option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.dubbo_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dubbo Proxy Route Configuration] // Dubbo Proxy :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/echo/v2/echo.proto b/api/envoy/config/filter/network/echo/v2/echo.proto index bbf554b98898..2b51ce4e18c3 100644 --- a/api/envoy/config/filter/network/echo/v2/echo.proto +++ b/api/envoy/config/filter/network/echo/v2/echo.proto @@ -3,11 +3,13 @@ syntax = "proto3"; package envoy.config.filter.network.echo.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.echo.v2"; option java_outer_classname = "EchoProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.echo.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Echo] // Echo :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto index 8d31231a3de1..40cea7061868 100644 --- a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto @@ -5,6 +5,7 @@ package envoy.config.filter.network.ext_authz.v2; import "envoy/api/v2/core/grpc_service.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v2"; @@ -12,6 +13,7 @@ option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.ext_authz.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Network External Authorization ] // The network layer external authorization service configuration diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 35fd122c06b1..3b4c29066e9a 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -18,6 +18,7 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2"; @@ -25,12 +26,13 @@ option java_outer_classname = "HttpConnectionManagerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.http_connection_manager.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 36] +// [#next-free-field: 37] message HttpConnectionManager { enum CodecType { // For every new connection, the connection manager will determine which @@ -334,7 +336,7 @@ message HttpConnectionManager { // timeout, although per-route idle timeout overrides will continue to apply. google.protobuf.Duration stream_idle_timeout = 24; - // A timeout for idle requests managed by the connection manager. + // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. @@ -491,6 +493,18 @@ message HttpConnectionManager { // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of // `HTTP spec ` and is provided for convenience. bool merge_slashes = 33; + + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. + // + // If not set, Envoy uses the default UUID-based behavior: + // + // 1. Request ID is propagated using *x-request-id* header. + // + // 2. Request ID is a universally unique identifier (UUID). + // + // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. + RequestIDExtension request_id_extension = 36; } message Rds { @@ -640,3 +654,8 @@ message HttpFilter { google.protobuf.Any typed_config = 4; } } + +message RequestIDExtension { + // Request ID extension specific configuration. + google.protobuf.Any typed_config = 1; +} diff --git a/api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto b/api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto index 7529bab63f31..ea2f60e71eed 100644 --- a/api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto +++ b/api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.network.kafka_broker.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.kafka_broker.v2alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "KafkaBrokerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.kafka_broker.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Kafka Broker] // Kafka Broker :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto b/api/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto index 73731971db62..791b767f3e6a 100644 --- a/api/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto +++ b/api/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto @@ -6,6 +6,7 @@ import "envoy/api/v2/core/base.proto"; import "envoy/type/token_bucket.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.local_rate_limit.v2alpha"; @@ -13,6 +14,7 @@ option java_outer_classname = "LocalRateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.local_ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Local rate limit] // Local rate limit :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto index 59dbb526e757..b261897858e2 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto +++ b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto @@ -5,6 +5,7 @@ package envoy.config.filter.network.mongo_proxy.v2; import "envoy/config/filter/fault/v2/fault.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2"; @@ -12,6 +13,7 @@ option java_outer_classname = "MongoProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.mongo_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Mongo proxy] // MongoDB :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto index ea63da925fcd..78c6b7e971df 100644 --- a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto +++ b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.network.mysql_proxy.v1alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "MysqlProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.mysql_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: MySQL proxy] // MySQL Proxy :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto index b50da02c2068..aed56c9af629 100644 --- a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto @@ -8,6 +8,7 @@ import "envoy/config/ratelimit/v2/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v2"; @@ -15,6 +16,7 @@ option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/rbac/v2/rbac.proto b/api/envoy/config/filter/network/rbac/v2/rbac.proto index 8700fa835b0e..ce86794c71cc 100644 --- a/api/envoy/config/filter/network/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/network/rbac/v2/rbac.proto @@ -5,12 +5,14 @@ package envoy.config.filter.network.rbac.v2; import "envoy/config/rbac/v2/rbac.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v2"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.rbac.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index e5ea0dd0362f..caca630fd297 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -7,10 +7,10 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/sensitive.proto"; - import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v2"; @@ -18,6 +18,7 @@ option java_outer_classname = "RedisProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.redis_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Redis Proxy] // Redis Proxy :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto b/api/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto index 1b5ac94af14d..71c161fc48f6 100644 --- a/api/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto +++ b/api/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.network.sni_cluster.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.sni_cluster.v2"; option java_outer_classname = "SniClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.sni_cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: SNI Cluster Filter] // Set the upstream cluster name from the SNI field in the TLS connection. diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index 053c1348e921..4ec68f320eed 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -11,6 +11,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2"; @@ -18,6 +19,7 @@ option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.tcp_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: TCP Proxy] // TCP Proxy :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto index 669faae7bf64..8230a52e341e 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto @@ -8,6 +8,7 @@ import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; @@ -15,6 +16,7 @@ option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.thrift_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Thrift Proxy Route Configuration] // Thrift Proxy :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto index 91ac5da3ef28..96e750ef310d 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; @@ -15,6 +16,7 @@ option java_outer_classname = "ThriftProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.thrift_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. diff --git a/api/envoy/config/filter/network/wasm/v2/wasm.proto b/api/envoy/config/filter/network/wasm/v2/wasm.proto deleted file mode 100644 index 34ce8bc12ec8..000000000000 --- a/api/envoy/config/filter/network/wasm/v2/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.wasm.v2; - -import "envoy/config/wasm/v2/wasm.proto"; - -import "udpa/annotations/migrate.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.wasm.v2"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.wasm.v3"; - -// [#protodoc-title: Wasm] -// Wasm :ref:`configuration overview `. - -message Wasm { - // General Plugin configuration. - config.wasm.v2.PluginConfig config = 1; -} diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto index b802bbb04b46..cae622cecc34 100644 --- a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto +++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto @@ -5,6 +5,7 @@ package envoy.config.filter.network.zookeeper_proxy.v1alpha1; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1"; @@ -12,6 +13,7 @@ option java_outer_classname = "ZookeeperProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.zookeeper_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: ZooKeeper proxy] // ZooKeeper Proxy :ref:`configuration overview `. diff --git a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto index 7004b882c5a7..389ddf35990e 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto +++ b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto @@ -7,6 +7,7 @@ import "envoy/config/ratelimit/v2/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1"; @@ -14,6 +15,7 @@ option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/api/envoy/config/filter/thrift/router/v2alpha1/BUILD b/api/envoy/config/filter/thrift/router/v2alpha1/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/filter/thrift/router/v2alpha1/BUILD +++ b/api/envoy/config/filter/thrift/router/v2alpha1/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/filter/thrift/router/v2alpha1/router.proto b/api/envoy/config/filter/thrift/router/v2alpha1/router.proto index 8661675ce364..5463ab6513be 100644 --- a/api/envoy/config/filter/thrift/router/v2alpha1/router.proto +++ b/api/envoy/config/filter/thrift/router/v2alpha1/router.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.config.filter.thrift.router.v2alpha1; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] // Thrift router :ref:`configuration overview `. diff --git a/api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD b/api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD new file mode 100644 index 000000000000..c6f01577c828 --- /dev/null +++ b/api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/api/v2/core:pkg", + "//envoy/data/dns/v2alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto b/api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto new file mode 100644 index 000000000000..de2608d44306 --- /dev/null +++ b/api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package envoy.config.filter.udp.dns_filter.v2alpha; + +import "envoy/api/v2/core/base.proto"; +import "envoy/data/dns/v2alpha/dns_table.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.filter.udp.dns_filter.v2alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.filter.udp.dns_filter.v3alpha"; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + // This message contains the configuration for the Dns Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v2alpha.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + api.v2.core.DataSource external_dns_table = 2; + } + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration + ServerContextConfig server_config = 2; +} diff --git a/api/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD b/api/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD +++ b/api/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto b/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto index 720277aa4ccc..5079c1f0df48 100644 --- a/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto +++ b/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto @@ -4,11 +4,13 @@ package envoy.config.filter.udp.udp_proxy.v2alpha; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha"; option java_outer_classname = "UdpProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: UDP proxy] // UDP proxy :ref:`configuration overview `. diff --git a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto index 9f17b40554be..b63d35af4018 100644 --- a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.config.grpc_credential.v2alpha; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; option java_outer_classname = "AwsIamProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Grpc Credentials AWS IAM] // Configuration for AWS IAM Grpc Credentials Plugin diff --git a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto index c5c6a4d980b9..41e67f0bf24b 100644 --- a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto +++ b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto @@ -5,10 +5,12 @@ package envoy.config.grpc_credential.v2alpha; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; option java_outer_classname = "FileBasedMetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Grpc Credentials File Based Metadata] // Configuration for File Based Metadata Grpc Credentials Plugin diff --git a/api/envoy/config/grpc_credential/v3/aws_iam.proto b/api/envoy/config/grpc_credential/v3/aws_iam.proto index a49436774b2f..eeb5d93ec689 100644 --- a/api/envoy/config/grpc_credential/v3/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v3/aws_iam.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.config.grpc_credential.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; option java_outer_classname = "AwsIamProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Grpc Credentials AWS IAM] // Configuration for AWS IAM Grpc Credentials Plugin diff --git a/api/envoy/config/grpc_credential/v3/file_based_metadata.proto b/api/envoy/config/grpc_credential/v3/file_based_metadata.proto index bee16939d7e8..b364d2917099 100644 --- a/api/envoy/config/grpc_credential/v3/file_based_metadata.proto +++ b/api/envoy/config/grpc_credential/v3/file_based_metadata.proto @@ -5,11 +5,13 @@ package envoy.config.grpc_credential.v3; import "envoy/config/core/v3/base.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; option java_outer_classname = "FileBasedMetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Grpc Credentials File Based Metadata] // Configuration for File Based Metadata Grpc Credentials Plugin diff --git a/api/envoy/config/health_checker/redis/v2/BUILD b/api/envoy/config/health_checker/redis/v2/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/health_checker/redis/v2/BUILD +++ b/api/envoy/config/health_checker/redis/v2/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/health_checker/redis/v2/redis.proto b/api/envoy/config/health_checker/redis/v2/redis.proto index 3f7e15d80d02..0c569f5c75e8 100644 --- a/api/envoy/config/health_checker/redis/v2/redis.proto +++ b/api/envoy/config/health_checker/redis/v2/redis.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.config.health_checker.redis.v2; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v2"; option java_outer_classname = "RedisProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Redis] // Redis health checker :ref:`configuration overview `. diff --git a/api/envoy/config/listener/v2/api_listener.proto b/api/envoy/config/listener/v2/api_listener.proto index 3f974cad9e68..6709d5fe0b52 100644 --- a/api/envoy/config/listener/v2/api_listener.proto +++ b/api/envoy/config/listener/v2/api_listener.proto @@ -5,11 +5,13 @@ package envoy.config.listener.v2; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v2"; option java_outer_classname = "ApiListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: API listener] diff --git a/api/envoy/config/listener/v3/BUILD b/api/envoy/config/listener/v3/BUILD index cd366902228b..71c151c040bc 100644 --- a/api/envoy/config/listener/v3/BUILD +++ b/api/envoy/config/listener/v3/BUILD @@ -8,6 +8,7 @@ api_proto_package( deps = [ "//envoy/api/v2:pkg", "//envoy/api/v2/listener:pkg", + "//envoy/config/accesslog/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/listener/v2:pkg", "//envoy/type/v3:pkg", diff --git a/api/envoy/config/listener/v3/api_listener.proto b/api/envoy/config/listener/v3/api_listener.proto index c8c3ea115963..4d3879a22b1d 100644 --- a/api/envoy/config/listener/v3/api_listener.proto +++ b/api/envoy/config/listener/v3/api_listener.proto @@ -4,11 +4,13 @@ package envoy.config.listener.v3; import "google/protobuf/any.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "ApiListenerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: API listener] diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto index 356402019440..473a5eb2b42b 100644 --- a/api/envoy/config/listener/v3/listener.proto +++ b/api/envoy/config/listener/v3/listener.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.listener.v3; +import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/socket_option.proto"; @@ -13,18 +14,19 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` -// [#next-free-field: 22] +// [#next-free-field: 23] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -232,4 +234,8 @@ message Listener { // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket // `_. bool reuse_port = 21; + + // Configuration for :ref:`access logs ` + // emitted by this listener. + repeated accesslog.v3.AccessLog access_log = 22; } diff --git a/api/envoy/config/listener/v3/listener_components.proto b/api/envoy/config/listener/v3/listener_components.proto index 6ba993b65132..88e75e65b526 100644 --- a/api/envoy/config/listener/v3/listener_components.proto +++ b/api/envoy/config/listener/v3/listener_components.proto @@ -10,13 +10,14 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "ListenerComponentsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener components] // Listener :ref:`configuration overview ` @@ -207,9 +208,32 @@ message FilterChain { string name = 7; } -// [#not-implemented-hide:] // Listener filter chain match configuration. This is a recursive structure which allows complex // nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3306 +// - destination_port_range: +// start: 15000 +// end: 15001 +// // [#next-free-field: 6] message ListenerFilterChainMatchPredicate { option (udpa.annotations.versioning).previous_message_type = @@ -266,17 +290,8 @@ message ListenerFilter { google.protobuf.Any typed_config = 3; } - // [#not-implemented-hide:] - // Decide when to disable this listener filter on incoming traffic. - // Example: - // 0. always enable filter - // don't set `filter_disabled` - // 1. disable when the destination port is 3306 - // rule.destination_port_range = Int32Range {start = 3306, end = 3307} - // 2. disable when the destination port is 3306 or 15000 - // rule.or_match = MatchSet.rules [ - // rule.destination_port_range = Int32Range {start = 3306, end = 3307}, - // rule.destination_port_range = Int32Range {start = 15000, end = 15001}, - // ] + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. ListenerFilterChainMatchPredicate filter_disabled = 4; } diff --git a/api/envoy/config/listener/v3/quic_config.proto b/api/envoy/config/listener/v3/quic_config.proto index 76345d2973cc..9949da2e0d70 100644 --- a/api/envoy/config/listener/v3/quic_config.proto +++ b/api/envoy/config/listener/v3/quic_config.proto @@ -5,11 +5,13 @@ package envoy.config.listener.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "QuicConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: QUIC listener Config] diff --git a/api/envoy/config/listener/v3/udp_listener_config.proto b/api/envoy/config/listener/v3/udp_listener_config.proto index 701a8bbe713d..9edbe5954862 100644 --- a/api/envoy/config/listener/v3/udp_listener_config.proto +++ b/api/envoy/config/listener/v3/udp_listener_config.proto @@ -5,11 +5,13 @@ package envoy.config.listener.v3; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: UDP Listener Config] // Listener :ref:`configuration overview ` diff --git a/api/envoy/config/metrics/v2/BUILD b/api/envoy/config/metrics/v2/BUILD index a0eac27f8a5d..94999290bca3 100644 --- a/api/envoy/config/metrics/v2/BUILD +++ b/api/envoy/config/metrics/v2/BUILD @@ -8,5 +8,6 @@ api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type/matcher:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/config/metrics/v2/metrics_service.proto b/api/envoy/config/metrics/v2/metrics_service.proto index d2f60a6a67a7..f1f8662f0750 100644 --- a/api/envoy/config/metrics/v2/metrics_service.proto +++ b/api/envoy/config/metrics/v2/metrics_service.proto @@ -4,11 +4,13 @@ package envoy.config.metrics.v2; import "envoy/api/v2/core/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v2"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metrics service] diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto index d3f797543a42..c6113bf5a5d3 100644 --- a/api/envoy/config/metrics/v2/stats.proto +++ b/api/envoy/config/metrics/v2/stats.proto @@ -9,11 +9,13 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v2"; option java_outer_classname = "StatsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Stats] // Statistics :ref:`architecture overview `. diff --git a/api/envoy/config/metrics/v3/metrics_service.proto b/api/envoy/config/metrics/v3/metrics_service.proto index 002aa7482e7d..ad9879055ba3 100644 --- a/api/envoy/config/metrics/v3/metrics_service.proto +++ b/api/envoy/config/metrics/v3/metrics_service.proto @@ -4,13 +4,14 @@ package envoy.config.metrics.v3; import "envoy/config/core/v3/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v3"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metrics service] diff --git a/api/envoy/config/metrics/v3/stats.proto b/api/envoy/config/metrics/v3/stats.proto index 2f6f5bade42a..f2f12d73a625 100644 --- a/api/envoy/config/metrics/v3/stats.proto +++ b/api/envoy/config/metrics/v3/stats.proto @@ -9,13 +9,14 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v3"; option java_outer_classname = "StatsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Stats] // Statistics :ref:`architecture overview `. diff --git a/api/envoy/config/overload/v2alpha/BUILD b/api/envoy/config/overload/v2alpha/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/overload/v2alpha/BUILD +++ b/api/envoy/config/overload/v2alpha/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/overload/v2alpha/overload.proto b/api/envoy/config/overload/v2alpha/overload.proto index ff71a1e5dcca..03886cdee6d6 100644 --- a/api/envoy/config/overload/v2alpha/overload.proto +++ b/api/envoy/config/overload/v2alpha/overload.proto @@ -6,11 +6,13 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.overload.v2alpha"; option java_outer_classname = "OverloadProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Overload Manager] diff --git a/api/envoy/config/overload/v3/overload.proto b/api/envoy/config/overload/v3/overload.proto index 22b93c3e85ce..d564e0d0ae3d 100644 --- a/api/envoy/config/overload/v3/overload.proto +++ b/api/envoy/config/overload/v3/overload.proto @@ -6,13 +6,14 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.overload.v3"; option java_outer_classname = "OverloadProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Overload Manager] diff --git a/api/envoy/config/ratelimit/v2/BUILD b/api/envoy/config/ratelimit/v2/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/api/envoy/config/ratelimit/v2/BUILD +++ b/api/envoy/config/ratelimit/v2/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/api/envoy/config/ratelimit/v2/rls.proto b/api/envoy/config/ratelimit/v2/rls.proto index e617ec22f5de..92801ea7b968 100644 --- a/api/envoy/config/ratelimit/v2/rls.proto +++ b/api/envoy/config/ratelimit/v2/rls.proto @@ -4,11 +4,13 @@ package envoy.config.ratelimit.v2; import "envoy/api/v2/core/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.ratelimit.v2"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit service] diff --git a/api/envoy/config/ratelimit/v3/rls.proto b/api/envoy/config/ratelimit/v3/rls.proto index efd056711a96..bb3c538bbabf 100644 --- a/api/envoy/config/ratelimit/v3/rls.proto +++ b/api/envoy/config/ratelimit/v3/rls.proto @@ -4,13 +4,14 @@ package envoy.config.ratelimit.v3; import "envoy/config/core/v3/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.ratelimit.v3"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit service] diff --git a/api/envoy/config/rbac/v2/BUILD b/api/envoy/config/rbac/v2/BUILD index 064f8f79e0b8..d5b5dda169a0 100644 --- a/api/envoy/config/rbac/v2/BUILD +++ b/api/envoy/config/rbac/v2/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/api/v2/core:pkg", "//envoy/api/v2/route:pkg", "//envoy/type/matcher:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/api/envoy/config/rbac/v2/rbac.proto b/api/envoy/config/rbac/v2/rbac.proto index fa5d27fdf673..943ac33e0859 100644 --- a/api/envoy/config/rbac/v2/rbac.proto +++ b/api/envoy/config/rbac/v2/rbac.proto @@ -10,11 +10,13 @@ import "envoy/type/matcher/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.rbac.v2"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Role Based Access Control (RBAC)] @@ -168,7 +170,7 @@ message Permission { } // Principal defines an identity or a group of identities for a downstream subject. -// [#next-free-field: 10] +// [#next-free-field: 12] message Principal { // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, // each are applied with the associated behavior. @@ -202,7 +204,21 @@ message Principal { Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. - api.v2.core.CidrRange source_ip = 5; + // This address will honor proxy protocol, but will not honor XFF. + api.v2.core.CidrRange source_ip = 5 [deprecated = true]; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This is always the physical peer even if the + // :ref:`remote_ip ` is inferred + // from for example the x-forwarder-for header, proxy protocol, etc. + api.v2.core.CidrRange direct_remote_ip = 10; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This may not be the physical peer and could be different from the + // :ref:`direct_remote_ip `. + // E.g, if the remote ip is inferred from for example the x-forwarder-for header, + // proxy protocol, etc. + api.v2.core.CidrRange remote_ip = 11; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. diff --git a/api/envoy/config/rbac/v3/rbac.proto b/api/envoy/config/rbac/v3/rbac.proto index 0d9b552d85cc..040f537d1f5c 100644 --- a/api/envoy/config/rbac/v3/rbac.proto +++ b/api/envoy/config/rbac/v3/rbac.proto @@ -10,13 +10,14 @@ import "envoy/type/matcher/v3/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.rbac.v3"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Role Based Access Control (RBAC)] @@ -179,7 +180,7 @@ message Permission { } // Principal defines an identity or a group of identities for a downstream subject. -// [#next-free-field: 10] +// [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; @@ -221,7 +222,21 @@ message Principal { Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. - core.v3.CidrRange source_ip = 5; + // This address will honor proxy protocol, but will not honor XFF. + core.v3.CidrRange source_ip = 5 [deprecated = true]; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This is always the physical peer even if the + // :ref:`remote_ip ` is inferred + // from for example the x-forwarder-for header, proxy protocol, etc. + core.v3.CidrRange direct_remote_ip = 10; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This may not be the physical peer and could be different from the + // :ref:`direct_remote_ip `. + // E.g, if the remote ip is inferred from for example the x-forwarder-for header, + // proxy protocol, etc. + core.v3.CidrRange remote_ip = 11; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. diff --git a/api/envoy/config/rbac/v4alpha/BUILD b/api/envoy/config/rbac/v4alpha/BUILD new file mode 100644 index 000000000000..dbfa8be4f36f --- /dev/null +++ b/api/envoy/config/rbac/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/rbac/v3:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", + ], +) diff --git a/api/envoy/config/rbac/v4alpha/rbac.proto b/api/envoy/config/rbac/v4alpha/rbac.proto new file mode 100644 index 000000000000..097231282f45 --- /dev/null +++ b/api/envoy/config/rbac/v4alpha/rbac.proto @@ -0,0 +1,258 @@ +syntax = "proto3"; + +package envoy.config.rbac.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v3/metadata.proto"; +import "envoy/type/matcher/v3/path.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "google/api/expr/v1alpha1/syntax.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.rbac.v4alpha"; +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Role Based Access Control (RBAC)] + +// Role Based Access Control (RBAC) provides service-level and method-level access control for a +// service. RBAC policies are additive. The policies are examined in order. A request is allowed +// once a matching policy is found (suppose the `action` is ALLOW). +// +// Here is an example of RBAC configuration. It has two policies: +// +// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so +// does "cluster.local/ns/default/sa/superuser". +// +// * Any user can read ("GET") the service at paths with prefix "/products", so long as the +// destination port is either 80 or 443. +// +// .. code-block:: yaml +// +// action: ALLOW +// policies: +// "service-admin": +// permissions: +// - any: true +// principals: +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/admin" +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/superuser" +// "product-viewer": +// permissions: +// - and_rules: +// rules: +// - header: { name: ":method", exact_match: "GET" } +// - url_path: +// path: { prefix: "/products" } +// - or_rules: +// rules: +// - destination_port: 80 +// - destination_port: 443 +// principals: +// - any: true +// +message RBAC { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.RBAC"; + + // Should we do safe-list or block-list style access control? + enum Action { + // The policies grant access to principals. The rest is denied. This is safe-list style + // access control. This is the default type. + ALLOW = 0; + + // The policies deny access to principals. The rest is allowed. This is block-list style + // access control. + DENY = 1; + } + + // The action to take if a policy matches. The request is allowed if and only if: + // + // * `action` is "ALLOWED" and at least one policy matches + // * `action` is "DENY" and none of the policies match + Action action = 1; + + // Maps from policy name to policy. A match occurs when at least one policy matches the request. + map policies = 2; +} + +// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if +// and only if at least one of its permissions match the action taking place AND at least one of its +// principals match the downstream AND the condition is true if specified. +message Policy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Policy"; + + // Required. The set of permissions that define a role. Each permission is matched with OR + // semantics. To match all actions for this policy, a single Permission with the `any` field set + // to true should be used. + repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Required. The set of principals that are assigned/denied the role based on “actionâ€. Each + // principal is matched with OR semantics. To match all downstreams for this policy, a single + // Principal with the `any` field set to true should be used. + repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; + + // An optional symbolic expression specifying an access control + // :ref:`condition `. The condition is combined + // with the permissions and the principals as a clause with AND semantics. + google.api.expr.v1alpha1.Expr condition = 3; +} + +// Permission defines an action (or actions) that a principal can take. +// [#next-free-field: 11] +message Permission { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Permission"; + + // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, + // each are applied with the associated behavior. + message Set { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.rbac.v3.Permission.Set"; + + repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + oneof rule { + option (validate.required) = true; + + // A set of rules that all must match in order to define the action. + Set and_rules = 1; + + // A set of rules where at least one must match in order to define the action. + Set or_rules = 2; + + // When any is set, it matches any action. + bool any = 3 [(validate.rules).bool = {const: true}]; + + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only + // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` + // field if you want to match the URL path without the query and fragment string. + route.v4alpha.HeaderMatcher header = 4; + + // A URL path on the incoming HTTP request. Only available for HTTP. + type.matcher.v3.PathMatcher url_path = 10; + + // A CIDR block that describes the destination IP. + core.v4alpha.CidrRange destination_ip = 5; + + // A port number that describes the destination port connecting to. + uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; + + // Metadata that describes additional information about the action. + type.matcher.v3.MetadataMatcher metadata = 7; + + // Negates matching the provided permission. For instance, if the value of `not_rule` would + // match, this permission would not match. Conversely, if the value of `not_rule` would not + // match, this permission would match. + Permission not_rule = 8; + + // The request server from the client's connection request. This is + // typically TLS SNI. + // + // .. attention:: + // + // The behavior of this field may be affected by how Envoy is configured + // as explained below. + // + // * If the :ref:`TLS Inspector ` + // filter is not added, and if a `FilterChainMatch` is not defined for + // the :ref:`server name `, + // a TLS connection's requested SNI server name will be treated as if it + // wasn't present. + // + // * A :ref:`listener filter ` may + // overwrite a connection's requested server name within Envoy. + // + // Please refer to :ref:`this FAQ entry ` to learn to + // setup SNI. + type.matcher.v3.StringMatcher requested_server_name = 9; + } +} + +// Principal defines an identity or a group of identities for a downstream subject. +// [#next-free-field: 12] +message Principal { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal"; + + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, + // each are applied with the associated behavior. + message Set { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.rbac.v3.Principal.Set"; + + repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // Authentication attributes for a downstream. + message Authenticated { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.rbac.v3.Principal.Authenticated"; + + reserved 1; + + // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the + // certificate, otherwise the subject field is used. If unset, it applies to any user that is + // authenticated. + type.matcher.v3.StringMatcher principal_name = 2; + } + + reserved 5; + + reserved "source_ip"; + + oneof identifier { + option (validate.required) = true; + + // A set of identifiers that all must match in order to define the downstream. + Set and_ids = 1; + + // A set of identifiers at least one must match in order to define the downstream. + Set or_ids = 2; + + // When any is set, it matches any downstream. + bool any = 3 [(validate.rules).bool = {const: true}]; + + // Authenticated attributes that identify the downstream. + Authenticated authenticated = 4; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This is always the physical peer even if the + // :ref:`remote_ip ` is inferred + // from for example the x-forwarder-for header, proxy protocol, etc. + core.v4alpha.CidrRange direct_remote_ip = 10; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This may not be the physical peer and could be different from the + // :ref:`direct_remote_ip `. + // E.g, if the remote ip is inferred from for example the x-forwarder-for header, + // proxy protocol, etc. + core.v4alpha.CidrRange remote_ip = 11; + + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only + // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` + // field if you want to match the URL path without the query and fragment string. + route.v4alpha.HeaderMatcher header = 6; + + // A URL path on the incoming HTTP request. Only available for HTTP. + type.matcher.v3.PathMatcher url_path = 9; + + // Metadata that describes additional information about the principal. + type.matcher.v3.MetadataMatcher metadata = 7; + + // Negates matching the provided principal. For instance, if the value of `not_id` would match, + // this principal would not match. Conversely, if the value of `not_id` would not match, this + // principal would match. + Principal not_id = 8; + } +} diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD +++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto index 0ec5c9424edd..529622a071e7 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto +++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.config.resource_monitor.fixed_heap.v2alpha; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha"; option java_outer_classname = "FixedHeapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Fixed heap] // [#extension: envoy.resource_monitors.fixed_heap] diff --git a/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD b/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD +++ b/api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto index 1073e16b1b6e..a9f056d2d29a 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto +++ b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.config.resource_monitor.injected_resource.v2alpha; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha"; option java_outer_classname = "InjectedResourceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Injected resource] // [#extension: envoy.resource_monitors.injected_resource] diff --git a/api/envoy/config/retry/omit_canary_hosts/v2/BUILD b/api/envoy/config/retry/omit_canary_hosts/v2/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/retry/omit_canary_hosts/v2/BUILD +++ b/api/envoy/config/retry/omit_canary_hosts/v2/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto b/api/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto index b4cf8c0cbcf2..107bf6fc2dbe 100644 --- a/api/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto +++ b/api/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.config.retry.omit_canary_hosts.v2; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.config.retry.omit_canary_hosts.v2"; option java_outer_classname = "OmitCanaryHostsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Omit Canary Hosts Predicate] // [#extension: envoy.retry_host_predicates.omit_canary_hosts] diff --git a/api/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto b/api/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto index 35bd5d00910c..d229cffef8ca 100644 --- a/api/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto +++ b/api/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto @@ -5,12 +5,14 @@ package envoy.config.retry.omit_host_metadata.v2; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.retry.omit_host_metadata.v2"; option java_outer_classname = "OmitHostMetadataConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.retry.host.omit_host_metadata.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Omit host metadata retry predicate] diff --git a/api/envoy/config/retry/previous_hosts/v2/BUILD b/api/envoy/config/retry/previous_hosts/v2/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/retry/previous_hosts/v2/BUILD +++ b/api/envoy/config/retry/previous_hosts/v2/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/retry/previous_hosts/v2/previous_hosts.proto b/api/envoy/config/retry/previous_hosts/v2/previous_hosts.proto index 75532397ff7c..e87e8cd70eaf 100644 --- a/api/envoy/config/retry/previous_hosts/v2/previous_hosts.proto +++ b/api/envoy/config/retry/previous_hosts/v2/previous_hosts.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.config.retry.previous_hosts.v2; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.config.retry.previous_hosts.v2"; option java_outer_classname = "PreviousHostsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Previous Hosts Predicate] // [#extension: envoy.retry_host_predicates.previous_hosts] diff --git a/api/envoy/config/retry/previous_priorities/BUILD b/api/envoy/config/retry/previous_priorities/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/config/retry/previous_priorities/BUILD +++ b/api/envoy/config/retry/previous_priorities/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto b/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto index 4e1703cd2529..e96741178576 100644 --- a/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto +++ b/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.config.retry.previous_priorities; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.retry.previous_priorities"; option java_outer_classname = "PreviousPrioritiesConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Previous priorities retry selector] diff --git a/api/envoy/config/route/v3/route.proto b/api/envoy/config/route/v3/route.proto index 5a1c4204c9b0..a528d99bd448 100644 --- a/api/envoy/config/route/v3/route.proto +++ b/api/envoy/config/route/v3/route.proto @@ -8,13 +8,14 @@ import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP route configuration] // * Routing :ref:`architecture overview ` diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index f1a91fe580a4..f63f0961249f 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -14,14 +14,15 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v3"; option java_outer_classname = "RouteComponentsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP route components] // * Routing :ref:`architecture overview ` @@ -1265,7 +1266,7 @@ message Tracing { // statistics are perfect in the sense that they are emitted on the downstream // side such that they include network level failures. // -// Documentation for :ref:`virtual cluster statistics `. +// Documentation for :ref:`virtual cluster statistics `. // // .. note:: // diff --git a/api/envoy/config/route/v3/scoped_route.proto b/api/envoy/config/route/v3/scoped_route.proto index fee88e862f47..f2b28ed974c0 100644 --- a/api/envoy/config/route/v3/scoped_route.proto +++ b/api/envoy/config/route/v3/scoped_route.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.config.route.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v3"; option java_outer_classname = "ScopedRouteProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` diff --git a/api/envoy/config/route/v4alpha/BUILD b/api/envoy/config/route/v4alpha/BUILD new file mode 100644 index 000000000000..507bedd76bdf --- /dev/null +++ b/api/envoy/config/route/v4alpha/BUILD @@ -0,0 +1,17 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/tracing/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/route/v4alpha/route.proto b/api/envoy/config/route/v4alpha/route.proto new file mode 100644 index 000000000000..1b805d35344f --- /dev/null +++ b/api/envoy/config/route/v4alpha/route.proto @@ -0,0 +1,117 @@ +syntax = "proto3"; + +package envoy.config.route.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP route configuration] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// [#next-free-field: 11] +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteConfiguration"; + + // The name of the route configuration. For example, it might match + // :ref:`route_config_name + // ` in + // :ref:`envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.Rds`. + string name = 1; + + // An array of virtual hosts that make up the route table. + repeated VirtualHost virtual_hosts = 2; + + // An array of virtual hosts will be dynamically loaded via the VHDS API. + // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used + // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for + // on-demand discovery of virtual hosts. The contents of these two fields will be merged to + // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration + // taking precedence. + Vhds vhds = 9; + + // Optionally specifies a list of HTTP headers that the connection manager + // will consider to be internal only. If they are found on external requests they will be cleaned + // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information. + repeated string internal_only_headers = 3 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; + + // Specifies a list of HTTP headers that should be added to each response that + // the connection manager encodes. Headers specified at this level are applied + // after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or + // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption response_headers_to_add = 4 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // that the connection manager encodes. + repeated string response_headers_to_remove = 5 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; + + // Specifies a list of HTTP headers that should be added to each request + // routed by the HTTP connection manager. Headers specified at this level are + // applied after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or + // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // routed by the HTTP connection manager. + repeated string request_headers_to_remove = 8 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; + + // By default, headers that should be added/removed are evaluated from most to least specific: + // + // * route level + // * virtual host level + // * connection manager level + // + // To allow setting overrides at the route or virtual host level, this order can be reversed + // by setting this option to true. Defaults to false. + // + // [#next-major-version: In the v3 API, this will default to true.] + bool most_specific_header_mutations_wins = 10; + + // An optional boolean that specifies whether the clusters that the route + // table refers to will be validated by the cluster manager. If set to true + // and a route refers to a non-existent cluster, the route table will not + // load. If set to false and a route refers to a non-existent cluster, the + // route table will load and the router filter will return a 404 if the route + // is selected at runtime. This setting defaults to true if the route table + // is statically defined via the :ref:`route_config + // ` + // option. This setting default to false if the route table is loaded dynamically via the + // :ref:`rds + // ` + // option. Users may wish to override the default behavior in certain cases (for example when + // using CDS with a static route table). + google.protobuf.BoolValue validate_clusters = 7; +} + +message Vhds { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Vhds"; + + // Configuration source specifier for VHDS. + core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto new file mode 100644 index 000000000000..33f8d64543df --- /dev/null +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -0,0 +1,1566 @@ +syntax = "proto3"; + +package envoy.config.route.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v3/regex.proto"; +import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/tracing/v3/custom_tag.proto"; +import "envoy/type/v3/percent.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; +option java_outer_classname = "RouteComponentsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP route components] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// The top level element in the routing configuration is a virtual host. Each virtual host has +// a logical name as well as a set of domains that get routed to it based on the incoming request's +// host header. This allows a single listener to service multiple top level domain path trees. Once +// a virtual host is selected based on the domain, the routes are processed in order to see which +// upstream cluster to route to or whether to perform a redirect. +// [#next-free-field: 21] +message VirtualHost { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.VirtualHost"; + + enum TlsRequirementType { + // No TLS requirement for the virtual host. + NONE = 0; + + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + EXTERNAL_ONLY = 1; + + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + ALL = 2; + } + + reserved 9, 12; + + reserved "per_filter_config"; + + // The logical name of the virtual host. This is used when emitting certain + // statistics but is not relevant for routing. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // A list of domains (host/authority header) that will be matched to this + // virtual host. Wildcard hosts are supported in the suffix or prefix form. + // + // Domain search order: + // 1. Exact domain names: ``www.foo.com``. + // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. + // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. + // 4. Special wildcard ``*`` matching any domain. + // + // .. note:: + // + // The wildcard will not match the empty string. + // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. + // The longest wildcards match first. + // Only a single virtual host in the entire route configuration can match on ``*``. A domain + // must be unique across all virtual hosts or the config will fail to load. + // + // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. + repeated string domains = 2 [(validate.rules).repeated = { + min_items: 1 + items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} + }]; + + // The list of routes that will be matched, in order, for incoming requests. + // The first route that matches will be used. + repeated Route routes = 3; + + // Specifies the type of TLS enforcement the virtual host expects. If this option is not + // specified, there is no TLS requirement for the virtual host. + TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; + + // A list of virtual clusters defined for this virtual host. Virtual clusters + // are used for additional statistics gathering. + repeated VirtualCluster virtual_clusters = 5; + + // Specifies a set of rate limit configurations that will be applied to the + // virtual host. + repeated RateLimit rate_limits = 6; + + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the + // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption request_headers_to_add = 7 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // handled by this virtual host. + repeated string request_headers_to_remove = 13; + + // Specifies a list of HTTP headers that should be added to each response + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the + // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // handled by this virtual host. + repeated string response_headers_to_remove = 11; + + // Indicates that the virtual host has a CORS policy. + CorsPolicy cors = 8; + + // The per_filter_config field can be used to provide virtual host-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map typed_per_filter_config = 15; + + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the upstream request. Setting this option will cause it to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the upstream + // will see the attempt count as perceived by the second Envoy. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + // + // [#next-major-version: rename to include_attempt_count_in_request.] + bool include_request_attempt_count = 14; + + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the downstream response. Setting this option will cause the router to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the downstream + // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + bool include_attempt_count_in_response = 19; + + // Indicates the retry policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + RetryPolicy retry_policy = 16; + + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that setting a route level entry + // will take precedence over this config and it'll be treated independently (e.g.: values are not + // inherited). :ref:`Retry policy ` should not be + // set if this field is used. + google.protobuf.Any retry_policy_typed_config = 20; + + // Indicates the hedge policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + HedgePolicy hedge_policy = 17; + + // The maximum bytes which will be buffered for retries and shadowing. + // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum + // value of this and the listener per_connection_buffer_limit_bytes. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; +} + +// A filter-defined action type. +message FilterAction { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.FilterAction"; + + google.protobuf.Any action = 1; +} + +// A route is both a specification of how to match a request as well as an indication of what to do +// next (e.g., redirect, forward, rewrite, etc.). +// +// .. attention:: +// +// Envoy supports routing on HTTP method via :ref:`header matching +// `. +// [#next-free-field: 18] +message Route { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Route"; + + reserved 6, 8; + + reserved "per_filter_config"; + + // Name for the route. + string name = 14; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + oneof action { + option (validate.required) = true; + + // Route request to some upstream cluster. + RouteAction route = 2; + + // Return a redirect. + RedirectAction redirect = 3; + + // Return an arbitrary HTTP response directly, without proxying. + DirectResponseAction direct_response = 7; + + // [#not-implemented-hide:] + // If true, a filter will define the action (e.g., it could dynamically generate the + // RouteAction). + FilterAction filter_action = 17; + } + + // The Metadata field can be used to provide additional information + // about the route. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as *envoy.filters.http.router*. + core.v4alpha.Metadata metadata = 4; + + // Decorator for the matched route. + Decorator decorator = 5; + + // The typed_per_filter_config field can be used to provide route-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` for + // if and how it is utilized. + map typed_per_filter_config = 13; + + // Specifies a set of headers that will be added to requests matching this + // route. Headers specified at this level are applied before headers from the + // enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and + // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption request_headers_to_add = 9 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // matching this route. + repeated string request_headers_to_remove = 12; + + // Specifies a set of headers that will be added to responses to requests + // matching this route. Headers specified at this level are applied before + // headers from the enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and + // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on + // :ref:`custom request headers `. + repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + repeated string response_headers_to_remove = 11; + + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + Tracing tracing = 15; + + // The maximum bytes which will be buffered for retries and shadowing. + // If set, the bytes actually buffered will be the minimum value of this and the + // listener per_connection_buffer_limit_bytes. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; +} + +// Compared to the :ref:`cluster ` field that specifies a +// single upstream cluster as the target of a request, the :ref:`weighted_clusters +// ` option allows for specification of +// multiple upstream clusters along with weights that indicate the percentage of +// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the +// weights. +message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.WeightedCluster"; + + // [#next-free-field: 11] + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.WeightedCluster.ClusterWeight"; + + reserved 7, 8; + + reserved "per_filter_config"; + + // Name of the upstream cluster. The cluster must exist in the + // :ref:`cluster manager configuration `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // An integer between 0 and :ref:`total_weight + // `. When a request matches the route, + // the choice of an upstream cluster is determined by its weight. The sum of weights across all + // entries in the clusters array must add up to the total_weight, which defaults to 100. + google.protobuf.UInt32Value weight = 2; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered for + // load balancing. Note that this will be merged with what's provided in + // :ref:`RouteAction.metadata_match `, with + // values here taking precedence. The filter name should be specified as *envoy.lb*. + core.v4alpha.Metadata metadata_match = 3; + + // Specifies a list of headers to be added to requests when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and + // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption request_headers_to_add = 4 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request when + // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. + repeated string request_headers_to_remove = 9; + + // Specifies a list of headers to be added to responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and + // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption response_headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of headers to be removed from responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. + repeated string response_headers_to_remove = 6; + + // The per_filter_config field can be used to provide weighted cluster-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map typed_per_filter_config = 10; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Specifies the total weight across all clusters. The sum of all cluster weights must equal this + // value, which must be greater than 0. Defaults to 100. + google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; + + // Specifies the runtime key prefix that should be used to construct the + // runtime keys associated with each cluster. When the *runtime_key_prefix* is + // specified, the router will look for weights associated with each upstream + // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where + // *cluster[i]* denotes an entry in the clusters array field. If the runtime + // key for the cluster does not exist, the value specified in the + // configuration file will be used as the default weight. See the :ref:`runtime documentation + // ` for how key names map to the underlying implementation. + string runtime_key_prefix = 2; +} + +// [#next-free-field: 12] +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch"; + + message GrpcRouteMatchOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions"; + } + + message TlsContextMatchOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteMatch.TlsContextMatchOptions"; + + // If specified, the route will match against whether or not a certificate is presented. + // If not specified, certificate presentation status (true or false) will not be considered when route matching. + google.protobuf.BoolValue presented = 1; + + // If specified, the route will match against whether or not a certificate is validated. + // If not specified, certificate validation status (true or false) will not be considered when route matching. + google.protobuf.BoolValue validated = 2; + } + + reserved 5, 3; + + reserved "regex"; + + oneof path_specifier { + option (validate.required) = true; + + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the *:path* header. + string prefix = 1; + + // If specified, the route is an exact path rule meaning that the path must + // exactly match the *:path* header once the query string is removed. + string path = 2; + + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + } + + // Indicates that prefix/path matching should be case insensitive. The default + // is true. + google.protobuf.BoolValue case_sensitive = 4; + + // Indicates that the route should additionally match on a runtime key. Every time the route + // is considered for a match, it must also fall under the percentage of matches indicated by + // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the router continues to evaluate the remaining match criteria. A runtime_fraction + // route configuration can be used to roll out route changes in a gradual manner without full + // code/config deploys. Refer to the :ref:`traffic shifting + // ` docs for additional documentation. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an + // integer with the assumption that the value is an integral percentage out of 100. For + // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent + // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. + core.v4alpha.RuntimeFractionalPercent runtime_fraction = 9; + + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + repeated HeaderMatcher headers = 6; + + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the *path* header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the *path* header's + // query string for a match to occur. + repeated QueryParameterMatcher query_parameters = 7; + + // If specified, only gRPC requests will be matched. The router will check + // that the content-type header has a application/grpc or one of the various + // application/grpc+ values. + GrpcRouteMatchOptions grpc = 8; + + // If specified, the client tls context will be matched against the defined + // match options. + // + // [#next-major-version: unify with RBAC] + TlsContextMatchOptions tls_context = 11; +} + +// [#next-free-field: 12] +message CorsPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.CorsPolicy"; + + reserved 1, 8, 7; + + reserved "allow_origin", "allow_origin_regex", "enabled"; + + // Specifies string patterns that match allowed origins. An origin is allowed if any of the + // string matchers match. + repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; + + // Specifies the content for the *access-control-allow-methods* header. + string allow_methods = 2; + + // Specifies the content for the *access-control-allow-headers* header. + string allow_headers = 3; + + // Specifies the content for the *access-control-expose-headers* header. + string expose_headers = 4; + + // Specifies the content for the *access-control-max-age* header. + string max_age = 5; + + // Specifies whether the resource allows credentials. + google.protobuf.BoolValue allow_credentials = 6; + + oneof enabled_specifier { + // Specifies the % of requests for which the CORS filter is enabled. + // + // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS + // filter will be enabled for 100% of the requests. + // + // If :ref:`runtime_key ` is + // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + } + + // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not + // enforced. + // + // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those + // fields have to explicitly disable the filter in order for this setting to take effect. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's *Origin* to determine if it's valid but will not enforce any policies. + core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; +} + +// [#next-free-field: 34] +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; + + enum ClusterNotFoundResponseCode { + // HTTP status code - 503 Service Unavailable. + SERVICE_UNAVAILABLE = 0; + + // HTTP status code - 404 Not Found. + NOT_FOUND = 1; + } + + // Configures :ref:`internal redirect ` behavior. + enum InternalRedirectAction { + PASS_THROUGH_INTERNAL_REDIRECT = 0; + HANDLE_INTERNAL_REDIRECT = 1; + } + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is + // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. + // + // .. note:: + // + // Shadowing will not be triggered if the primary cluster does not exist. + message RequestMirrorPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.RequestMirrorPolicy"; + + reserved 2; + + reserved "runtime_key"; + + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If not specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the `runtime_key` field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + core.v4alpha.RuntimeFractionalPercent runtime_fraction = 3; + + // Determines if the trace span should be sampled. Defaults to true. + google.protobuf.BoolValue trace_sampled = 4; + } + + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + // `. + // [#next-free-field: 7] + message HashPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy"; + + message Header { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.Header"; + + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + string header_name = 1 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} + ]; + } + + // Envoy supports two types of cookie affinity: + // + // 1. Passive. Envoy takes a cookie that's present in the cookies header and + // hashes on its value. + // + // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + // on the first request from the client in its response to the client, + // based on the endpoint the request gets sent to. The client then + // presents this on the next and all subsequent requests. The hash of + // this is sufficient to ensure these requests get sent to the same + // endpoint. The cookie is generated by hashing the source and + // destination ports and addresses so that multiple independent HTTP2 + // streams on the same connection will independently receive the same + // cookie, even if they arrive at the Envoy simultaneously. + message Cookie { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.Cookie"; + + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + google.protobuf.Duration ttl = 2; + + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + string path = 3; + } + + message ConnectionProperties { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties"; + + // Hash on source IP address. + bool source_ip = 1; + } + + message QueryParameter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter"; + + // The name of the URL query parameter that will be used to obtain the hash + // key. If the parameter is not present, no hash will be produced. Query + // parameter names are case-sensitive. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + message FilterState { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.FilterState"; + + // The name of the Object in the per-request filterState, which is an + // Envoy::Http::Hashable object. If there is no data associated with the key, + // or the stored object is not Envoy::Http::Hashable, no hash will be produced. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + oneof policy_specifier { + option (validate.required) = true; + + // Header hash policy. + Header header = 1; + + // Cookie hash policy. + Cookie cookie = 2; + + // Connection properties hash policy. + ConnectionProperties connection_properties = 3; + + // Query parameter hash policy. + QueryParameter query_parameter = 5; + + // Filter state hash policy. + FilterState filter_state = 6; + } + + // The flag that short-circuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; + } + + // Allows enabling and disabling upgrades on a per-route basis. + // This overrides any enabled/disabled upgrade filter chain specified in the + // HttpConnectionManager + // :ref:`upgrade_configs + // ` + // but does not affect any custom filter chain specified there. + message UpgradeConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.UpgradeConfig"; + + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + string upgrade_type = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Determines if upgrades are available on this route. Defaults to true. + google.protobuf.BoolValue enabled = 2; + } + + reserved 12, 18, 19, 16, 22, 21, 10; + + reserved "request_mirror_policy"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + string cluster_header = 2 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedCluster weighted_clusters = 3; + } + + // The HTTP status code to use when configured cluster is not found. + // The default response code is 503 Service Unavailable. + ClusterNotFoundResponseCode cluster_not_found_response_code = 20 + [(validate.rules).enum = {defined_only: true}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what's set in this field will be considered + // for load balancing. If using :ref:`weighted_clusters + // `, metadata will be merged, with values + // provided there taking precedence. The filter name should be specified as *envoy.lb*. + core.v4alpha.Metadata metadata_match = 4; + + // Indicates that during forwarding, the matched prefix (or path) should be + // swapped with this value. This option allows application URLs to be rooted + // at a different path from those exposed at the reverse proxy layer. The router filter will + // place the original path before rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of *prefix_rewrite* or + // :ref:`regex_rewrite ` + // may be specified. + // + // .. attention:: + // + // Pay careful attention to the use of trailing slashes in the + // :ref:`route's match ` prefix value. + // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, + // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single + // :ref:`Route `, as shown by the below config entries: + // + // .. code-block:: yaml + // + // - match: + // prefix: "/prefix/" + // route: + // prefix_rewrite: "/" + // - match: + // prefix: "/prefix" + // route: + // prefix_rewrite: "/" + // + // Having above entries in the config, requests to */prefix* will be stripped to */*, while + // requests to */prefix/etc* will be stripped to */etc*. + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during forwarding, portions of the path that match the + // pattern should be rewritten, even allowing the substitution of capture + // groups from the pattern into the new path as specified by the rewrite + // substitution string. This is useful to allow application paths to be + // rewritten in a way that is aware of segments with variable content like + // identifiers. The router filter will place the original path as it was + // before the rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of :ref:`prefix_rewrite ` + // or *regex_rewrite* may be specified. + // + // Examples using Google's `RE2 `_ engine: + // + // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution + // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` + // into ``/v1/api/instance/foo``. + // + // * The pattern ``one`` paired with a substitution string of ``two`` would + // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. + // + // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of + // ``\1two\2`` would replace only the first occurrence of ``one``, + // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. + // + // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` + // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to + // ``/aaa/yyy/bbb``. + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 6 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type *strict_dns* or *logical_dns*. Setting this to true with other cluster + // types has no effect. + google.protobuf.BoolValue auto_host_rewrite = 7; + + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + string host_rewrite_header = 29 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + } + + // Specifies the upstream timeout for the route. If not specified, the default is 15s. This + // spans between the point at which the entire downstream request (i.e. end-of-stream) has been + // processed and when the upstream response has been completely processed. A value of 0 will + // disable the route's timeout. + // + // .. note:: + // + // This timeout includes all retries. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + google.protobuf.Duration timeout = 8; + + // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, + // although the connection manager wide :ref:`stream_idle_timeout + // ` + // will still apply. A value of 0 will completely disable the route's idle timeout, even if a + // connection manager stream idle timeout is configured. + // + // The idle timeout is distinct to :ref:`timeout + // `, which provides an upper bound + // on the upstream response time; :ref:`idle_timeout + // ` instead bounds the amount + // of time the request's stream may be idle. + // + // After header decoding, the idle timeout will apply on downstream and + // upstream request events. Each time an encode/decode event for headers or + // data is processed for the stream, the timer will be reset. If the timeout + // fires, the stream is terminated with a 408 Request Timeout error code if no + // upstream response header has been received, otherwise a stream reset + // occurs. + google.protobuf.Duration idle_timeout = 24; + + // Indicates that the route has a retry policy. Note that if this is set, + // it'll take precedence over the virtual host level retry policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + RetryPolicy retry_policy = 9; + + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that if this is set, it'll take + // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, + // most internal one becomes the enforced policy). :ref:`Retry policy ` + // should not be set if this field is used. + google.protobuf.Any retry_policy_typed_config = 33; + + // Indicates that the route has request mirroring policies. + repeated RequestMirrorPolicy request_mirror_policies = 30; + + // Optionally specifies the :ref:`routing priority `. + core.v4alpha.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; + + // Specifies a set of rate limit configurations that could be applied to the + // route. + repeated RateLimit rate_limits = 13; + + // Specifies if the rate limit filter should include the virtual host rate + // limits. By default, if the route configured rate limits, the virtual host + // :ref:`rate_limits ` are not applied to the + // request. + google.protobuf.BoolValue include_vh_rate_limits = 14; + + // Specifies a list of hash policies to use for ring hash load balancing. Each + // hash policy is evaluated individually and the combined result is used to + // route the request. The method of combination is deterministic such that + // identical lists of hash policies will produce the same hash. Since a hash + // policy examines specific parts of a request, it can fail to produce a hash + // (i.e. if the hashed header is not present). If (and only if) all configured + // hash policies fail to generate a hash, no hash will be produced for + // the route. In this case, the behavior is the same as if no hash policies + // were specified (i.e. the ring hash load balancer will choose a random + // backend). If a hash policy has the "terminal" attribute set to true, and + // there is already a hash generated, the hash is returned immediately, + // ignoring the rest of the hash policy list. + repeated HashPolicy hash_policy = 15; + + // Indicates that the route has a CORS policy. + CorsPolicy cors = 17; + + // If present, and the request is a gRPC request, use the + // `grpc-timeout header `_, + // or its default value (infinity) instead of + // :ref:`timeout `, but limit the applied timeout + // to the maximum value specified here. If configured as 0, the maximum allowed timeout for + // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used + // and gRPC requests time out like any other requests using + // :ref:`timeout ` or its default. + // This can be used to prevent unexpected upstream request timeouts due to potentially long + // time gaps between gRPC request and response in gRPC streaming mode. + // + // .. note:: + // + // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes + // precedence over `grpc-timeout header `_, when + // both are present. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + google.protobuf.Duration max_grpc_timeout = 23; + + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + google.protobuf.Duration grpc_timeout_offset = 28; + + repeated UpgradeConfig upgrade_configs = 25; + + InternalRedirectAction internal_redirect_action = 26; + + // An internal redirect is handled, iff the number of previous internal redirects that a + // downstream request has encountered is lower than this value, and + // :ref:`internal_redirect_action ` + // is set to :ref:`HANDLE_INTERNAL_REDIRECT + // ` + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or has + // :ref:`internal_redirect_action ` + // set to + // :ref:`PASS_THROUGH_INTERNAL_REDIRECT + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 31; + + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + HedgePolicy hedge_policy = 27; +} + +// HTTP retry :ref:`architecture overview `. +// [#next-free-field: 11] +message RetryPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy"; + + message RetryPriority { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RetryPolicy.RetryPriority"; + + reserved 2; + + reserved "config"; + + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + message RetryHostPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RetryPolicy.RetryHostPredicate"; + + reserved 2; + + reserved "config"; + + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + message RetryBackOff { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RetryPolicy.RetryBackOff"; + + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the `base_interval` if set. The default is 10 times the + // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; + } + + // Specifies the conditions under which retry takes place. These are the same + // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + string retry_on = 1; + + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. These are the same conditions documented for + // :ref:`config_http_filters_router_x-envoy-max-retries`. + google.protobuf.UInt32Value num_retries = 2; + + // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The + // same conditions documented for + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + // + // .. note:: + // + // If left unspecified, Envoy will use the global + // :ref:`route timeout ` for the request. + // Consequently, when using a :ref:`5xx ` based + // retry policy, a request that times out will not be retried as the total timeout budget + // would have been exhausted. + google.protobuf.Duration per_try_timeout = 3; + + // Specifies an implementation of a RetryPriority which is used to determine the + // distribution of load across priorities used for retries. Refer to + // :ref:`retry plugin configuration ` for more details. + RetryPriority retry_priority = 4; + + // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host + // for retries. If any of the predicates reject the host, host selection will be reattempted. + // Refer to :ref:`retry plugin configuration ` for more + // details. + repeated RetryHostPredicate retry_host_predicate = 5; + + // The maximum number of times host selection will be reattempted before giving up, at which + // point the host that was last selected will be routed to. If unspecified, this will default to + // retrying once. + int64 host_selection_retry_max_attempts = 6; + + // HTTP status codes that should trigger a retry in addition to those specified by retry_on. + repeated uint32 retriable_status_codes = 7; + + // Specifies parameters that control retry back off. This parameter is optional, in which case the + // default base interval is 25 milliseconds or, if set, the current value of the + // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times + // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + // describes Envoy's back-off algorithm. + RetryBackOff retry_back_off = 8; + + // HTTP response headers that trigger a retry if present in the response. A retry will be + // triggered if any of the header matches match the upstream response headers. + // The field is only consulted if 'retriable-headers' retry policy is active. + repeated HeaderMatcher retriable_headers = 9; + + // HTTP headers which must be present in the request for retries to be attempted. + repeated HeaderMatcher retriable_request_headers = 10; +} + +// HTTP request hedging :ref:`architecture overview `. +message HedgePolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.HedgePolicy"; + + // Specifies the number of initial requests that should be sent upstream. + // Must be at least 1. + // Defaults to 1. + // [#not-implemented-hide:] + google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; + + // Specifies a probability that an additional upstream request should be sent + // on top of what is specified by initial_requests. + // Defaults to 0. + // [#not-implemented-hide:] + type.v3.FractionalPercent additional_request_chance = 2; + + // Indicates that a hedged request should be sent when the per-try timeout + // is hit. This will only occur if the retry policy also indicates that a + // timed out request should be retried. + // Once a timed out request is retried due to per try timeout, the router + // filter will ensure that it is not retried again even if the returned + // response headers would otherwise be retried according the specified + // :ref:`RetryPolicy `. + // Defaults to false. + bool hedge_on_per_try_timeout = 3; +} + +// [#next-free-field: 9] +message RedirectAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RedirectAction"; + + enum RedirectResponseCode { + // Moved Permanently HTTP Status Code - 301. + MOVED_PERMANENTLY = 0; + + // Found HTTP Status Code - 302. + FOUND = 1; + + // See Other HTTP Status Code - 303. + SEE_OTHER = 2; + + // Temporary Redirect HTTP Status Code - 307. + TEMPORARY_REDIRECT = 3; + + // Permanent Redirect HTTP Status Code - 308. + PERMANENT_REDIRECT = 4; + } + + // When the scheme redirection take place, the following rules apply: + // 1. If the source URI scheme is `http` and the port is explicitly + // set to `:80`, the port will be removed after the redirection + // 2. If the source URI scheme is `https` and the port is explicitly + // set to `:443`, the port will be removed after the redirection + oneof scheme_rewrite_specifier { + // The scheme portion of the URL will be swapped with "https". + bool https_redirect = 4; + + // The scheme portion of the URL will be swapped with this value. + string scheme_redirect = 7; + } + + // The host portion of the URL will be swapped with this value. + string host_redirect = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // The port value of the URL will be swapped with this value. + uint32 port_redirect = 8; + + oneof path_rewrite_specifier { + // The path portion of the URL will be swapped with this value. + string path_redirect = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during redirection, the matched prefix (or path) + // should be swapped with this value. This option allows redirect URLs be dynamically created + // based on the request. + // + // .. attention:: + // + // Pay attention to the use of trailing slashes as mentioned in + // :ref:`RouteAction's prefix_rewrite `. + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } + + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; + + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + bool strip_query = 6; +} + +message DirectResponseAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.DirectResponseAction"; + + // Specifies the HTTP response status to be returned. + uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; + + // Specifies the content of the response body. If this setting is omitted, + // no body is included in the generated response. + // + // .. note:: + // + // Headers can be specified using *response_headers_to_add* in the enclosing + // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` or + // :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`. + core.v4alpha.DataSource body = 2; +} + +message Decorator { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Decorator"; + + // The operation name associated with the request matched to this route. If tracing is + // enabled, this information will be used as the span name reported for this request. + // + // .. note:: + // + // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden + // by the :ref:`x-envoy-decorator-operation + // ` header. + string operation = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Whether the decorated details should be propagated to the other party. The default is true. + google.protobuf.BoolValue propagate = 2; +} + +message Tracing { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Tracing"; + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + type.v3.FractionalPercent client_sampling = 1; + + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.FractionalPercent random_sampling = 2; + + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.FractionalPercent overall_sampling = 3; + + // A list of custom tags with unique tag name to create tags for the active span. + // It will take effect after merging with the :ref:`corresponding configuration + // ` + // configured in the HTTP connection manager. If two tags with the same name are configured + // each in the HTTP connection manager and the route level, the one configured here takes + // priority. + repeated type.tracing.v3.CustomTag custom_tags = 4; +} + +// A virtual cluster is a way of specifying a regex matching rule against +// certain important endpoints such that statistics are generated explicitly for +// the matched requests. The reason this is useful is that when doing +// prefix/path matching Envoy does not always know what the application +// considers to be an endpoint. Thus, it’s impossible for Envoy to generically +// emit per endpoint statistics. However, often systems have highly critical +// endpoints that they wish to get “perfect†statistics on. Virtual cluster +// statistics are perfect in the sense that they are emitted on the downstream +// side such that they include network level failures. +// +// Documentation for :ref:`virtual cluster statistics `. +// +// .. note:: +// +// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for +// every application endpoint. This is both not easily maintainable and as well the matching and +// statistics output are not free. +message VirtualCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.VirtualCluster"; + + reserved 1, 3; + + reserved "pattern", "method"; + + // Specifies a list of header matchers to use for matching requests. Each specified header must + // match. The pseudo-headers `:path` and `:method` can be used to match the request path and + // method, respectively. + repeated HeaderMatcher headers = 4; + + // Specifies the name of the virtual cluster. The virtual cluster name as well + // as the virtual host name are used when emitting statistics. The statistics are emitted by the + // router filter and are documented :ref:`here `. + string name = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Global rate limiting :ref:`architecture overview `. +message RateLimit { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; + + // [#next-free-field: 7] + message Action { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action"; + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("source_cluster", "") + // + // is derived from the :option:`--service-cluster` option. + message SourceCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.SourceCluster"; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("destination_cluster", "") + // + // Once a request matches against a route table rule, a routed cluster is determined by one of + // the following :ref:`route table configuration ` + // settings: + // + // * :ref:`cluster ` indicates the upstream cluster + // to route to. + // * :ref:`weighted_clusters ` + // chooses a cluster randomly from a set of clusters with attributed weight. + // * :ref:`cluster_header ` indicates which + // header in the request contains the target cluster. + message DestinationCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.DestinationCluster"; + } + + // The following descriptor entry is appended when a header contains a key that matches the + // *header_name*: + // + // .. code-block:: cpp + // + // ("", "") + message RequestHeaders { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.RequestHeaders"; + + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + string header_name = 1 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} + ]; + + // The key to use in the descriptor entry. + string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // The following descriptor entry is appended to the descriptor and is populated using the + // trusted address from :ref:`x-forwarded-for `: + // + // .. code-block:: cpp + // + // ("remote_address", "") + message RemoteAddress { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.RemoteAddress"; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("generic_key", "") + message GenericKey { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.GenericKey"; + + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("header_match", "") + message HeaderValueMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.HeaderValueMatch"; + + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + google.protobuf.BoolValue expect_match = 2; + + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request’s headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; + } + + oneof action_specifier { + option (validate.required) = true; + + // Rate limit on source cluster. + SourceCluster source_cluster = 1; + + // Rate limit on destination cluster. + DestinationCluster destination_cluster = 2; + + // Rate limit on request headers. + RequestHeaders request_headers = 3; + + // Rate limit on remote address. + RemoteAddress remote_address = 4; + + // Rate limit on a generic key. + GenericKey generic_key = 5; + + // Rate limit on the existence of request headers. + HeaderValueMatch header_value_match = 6; + } + } + + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; +} + +// .. attention:: +// +// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* +// header. Thus, if attempting to match on *Host*, match on *:authority* instead. +// +// .. attention:: +// +// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both +// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., +// +// .. code-block:: json +// +// { +// "name": ":method", +// "exact_match": "POST" +// } +// +// .. attention:: +// In the absence of any header match specifier, match will default to :ref:`present_match +// `. i.e, a request that has the :ref:`name +// ` header will match, regardless of the header's +// value. +// +// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] +// [#next-free-field: 12] +message HeaderMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.HeaderMatcher"; + + reserved 2, 3, 5; + + reserved "regex_match"; + + // Specifies the name of the header in the request. + string name = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Specifies how the header match will be performed to route the request. + oneof header_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 4; + + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. + type.matcher.v3.RegexMatcher safe_regex_match = 11; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting of + // an optional plus or minus sign followed by a sequence of digits. The rule will not match if + // the header value does not represent an integer. Match will fail for empty values, floating + // point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, + // "-1somestring" + type.v3.Int64Range range_match = 6; + + // If specified, header match will be performed based on whether the header is in the + // request. + bool present_match = 7; + + // If specified, header match will be performed based on the prefix of the header value. + // Note: empty prefix is not allowed, please use present_match instead. + // + // Examples: + // + // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. + string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, header match will be performed based on the suffix of the header value. + // Note: empty suffix is not allowed, please use present_match instead. + // + // Examples: + // + // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. + string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; + } + + // If specified, the match result will be inverted before checking. Defaults to false. + // + // Examples: + // + // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. + bool invert_match = 8; +} + +// Query parameter matching treats the query string of a request's :path header +// as an ampersand-separated list of keys and/or key=value elements. +// [#next-free-field: 7] +message QueryParameterMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.QueryParameterMatcher"; + + reserved 3, 4; + + reserved "value", "regex"; + + // Specifies the name of a key that must be present in the requested + // *path*'s query string. + string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; + + oneof query_parameter_match_specifier { + // Specifies whether a query parameter value should match against a string. + type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + + // Specifies whether a query parameter should be present. + bool present_match = 6; + } +} diff --git a/api/envoy/config/route/v4alpha/scoped_route.proto b/api/envoy/config/route/v4alpha/scoped_route.proto new file mode 100644 index 000000000000..ce3d285b0592 --- /dev/null +++ b/api/envoy/config/route/v4alpha/scoped_route.proto @@ -0,0 +1,117 @@ +syntax = "proto3"; + +package envoy.config.route.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; +option java_outer_classname = "ScopedRouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP scoped routing configuration] +// * Routing :ref:`architecture overview ` + +// Specifies a routing scope, which associates a +// :ref:`Key` to a +// :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` (identified by its resource name). +// +// The HTTP connection manager builds up a table consisting of these Key to +// RouteConfiguration mappings, and looks up the RouteConfiguration to use per +// request according to the algorithm specified in the +// :ref:`scope_key_builder` +// assigned to the HttpConnectionManager. +// +// For example, with the following configurations (in YAML): +// +// HttpConnectionManager config: +// +// .. code:: +// +// ... +// scoped_routes: +// name: foo-scoped-routes +// scope_key_builder: +// fragments: +// - header_value_extractor: +// name: X-Route-Selector +// element_separator: , +// element: +// separator: = +// key: vip +// +// ScopedRouteConfiguration resources (specified statically via +// :ref:`scoped_route_configurations_list` +// or obtained dynamically via SRDS): +// +// .. code:: +// +// (1) +// name: route-scope1 +// route_configuration_name: route-config1 +// key: +// fragments: +// - string_key: 172.10.10.20 +// +// (2) +// name: route-scope2 +// route_configuration_name: route-config2 +// key: +// fragments: +// - string_key: 172.20.20.30 +// +// A request from a client such as: +// +// .. code:: +// +// GET / HTTP/1.1 +// Host: foo.com +// X-Route-Selector: vip=172.10.10.20 +// +// would result in the routing table defined by the `route-config1` +// RouteConfiguration being assigned to the HTTP request/stream. +// +message ScopedRouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.ScopedRouteConfiguration"; + + // Specifies a key which is matched against the output of the + // :ref:`scope_key_builder` + // specified in the HttpConnectionManager. The matching is done per HTTP + // request and is dependent on the order of the fragments contained in the + // Key. + message Key { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.ScopedRouteConfiguration.Key"; + + message Fragment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment"; + + oneof type { + option (validate.required) = true; + + // A string to match against. + string string_key = 1; + } + } + + // The ordered set of fragments to match against. The order must match the + // fragments in the corresponding + // :ref:`scope_key_builder`. + repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // The name assigned to the routing scope. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v3.DiscoveryRequest` to an + // RDS server to fetch the :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` associated + // with this scope. + string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; + + // The key to match against. + Key key = 3 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/config/tap/v3/common.proto b/api/envoy/config/tap/v3/common.proto index d01ce006faea..0fea8f88a638 100644 --- a/api/envoy/config/tap/v3/common.proto +++ b/api/envoy/config/tap/v3/common.proto @@ -8,13 +8,14 @@ import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.tap.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common tap configuration] diff --git a/api/envoy/config/trace/v2/BUILD b/api/envoy/config/trace/v2/BUILD index 15069690c2e8..ca496808bdae 100644 --- a/api/envoy/config/trace/v2/BUILD +++ b/api/envoy/config/trace/v2/BUILD @@ -8,6 +8,7 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", ], ) diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto index 420e4aa28ff0..393465d2bb24 100644 --- a/api/envoy/config/trace/v2/trace.proto +++ b/api/envoy/config/trace/v2/trace.proto @@ -11,11 +11,13 @@ import "google/protobuf/wrappers.proto"; import "opencensus/proto/trace/v1/trace_config.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "TraceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tracing] // Tracing :ref:`architecture overview `. @@ -61,12 +63,31 @@ message Tracing { // Configuration for the LightStep tracer. // [#extension: envoy.tracers.lightstep] message LightstepConfig { + // Available propagation modes + enum PropagationMode { + // Propagate trace context in the single header x-ot-span-context. + ENVOY = 0; + + // Propagate trace context using LightStep's native format. + LIGHTSTEP = 1; + + // Propagate trace context using the b3 format. + B3 = 2; + + // Propagation trace context using the w3 trace-context standard. + TRACE_CONTEXT = 3; + } + // The cluster manager cluster that hosts the LightStep collectors. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // File containing the access token to the `LightStep // `_ API. string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Propagation modes to use by LightStep's tracer. + repeated PropagationMode propagation_modes = 3 + [(validate.rules).repeated = {items {enum {defined_only: true}}}]; } // Configuration for the Zipkin tracer. diff --git a/api/envoy/config/trace/v2alpha/BUILD b/api/envoy/config/trace/v2alpha/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/api/envoy/config/trace/v2alpha/BUILD +++ b/api/envoy/config/trace/v2alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/api/envoy/config/trace/v2alpha/xray.proto b/api/envoy/config/trace/v2alpha/xray.proto index d415846dfef1..27db3ba40b72 100644 --- a/api/envoy/config/trace/v2alpha/xray.proto +++ b/api/envoy/config/trace/v2alpha/xray.proto @@ -5,11 +5,13 @@ package envoy.config.trace.v2alpha; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2alpha"; option java_outer_classname = "XrayProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: AWS X-Ray Tracer Configuration] // Configuration for AWS X-Ray tracer @@ -19,8 +21,8 @@ message XRayConfig { // If this value is not set, the default value of 127.0.0.1:2000 will be used. api.v2.core.SocketAddress daemon_endpoint = 1; - // The name of the X-Ray segment. By default this will be set to the cluster name. - string segment_name = 2; + // The name of the X-Ray segment. + string segment_name = 2 [(validate.rules).string = {min_len: 1}]; // The location of a local custom sampling rules JSON file. // For an example of the sampling rules see: diff --git a/api/envoy/config/trace/v3/trace.proto b/api/envoy/config/trace/v3/trace.proto index 12e944269a1d..ec055e3c61b0 100644 --- a/api/envoy/config/trace/v3/trace.proto +++ b/api/envoy/config/trace/v3/trace.proto @@ -9,14 +9,16 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "opencensus/proto/trace/v1/trace_config.proto"; -import "udpa/annotations/versioning.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "TraceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tracing] // Tracing :ref:`architecture overview `. @@ -72,12 +74,31 @@ message LightstepConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.LightstepConfig"; + // Available propagation modes + enum PropagationMode { + // Propagate trace context in the single header x-ot-span-context. + ENVOY = 0; + + // Propagate trace context using LightStep's native format. + LIGHTSTEP = 1; + + // Propagate trace context using the b3 format. + B3 = 2; + + // Propagation trace context using the w3 trace-context standard. + TRACE_CONTEXT = 3; + } + // The cluster manager cluster that hosts the LightStep collectors. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // File containing the access token to the `LightStep // `_ API. string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Propagation modes to use by LightStep's tracer. + repeated PropagationMode propagation_modes = 3 + [(validate.rules).repeated = {items {enum {defined_only: true}}}]; } // Configuration for the Zipkin tracer. diff --git a/api/envoy/config/trace/v3/xray.proto b/api/envoy/config/trace/v3/xray.proto index 08baf444c38a..c4259177d657 100644 --- a/api/envoy/config/trace/v3/xray.proto +++ b/api/envoy/config/trace/v3/xray.proto @@ -5,13 +5,14 @@ package envoy.config.trace.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "XrayProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AWS X-Ray Tracer Configuration] // Configuration for AWS X-Ray tracer @@ -24,8 +25,8 @@ message XRayConfig { // If this value is not set, the default value of 127.0.0.1:2000 will be used. core.v3.SocketAddress daemon_endpoint = 1; - // The name of the X-Ray segment. By default this will be set to the cluster name. - string segment_name = 2; + // The name of the X-Ray segment. + string segment_name = 2 [(validate.rules).string = {min_len: 1}]; // The location of a local custom sampling rules JSON file. // For an example of the sampling rules see: diff --git a/api/envoy/config/trace/v4alpha/BUILD b/api/envoy/config/trace/v4alpha/BUILD new file mode 100644 index 000000000000..53ae98aac140 --- /dev/null +++ b/api/envoy/config/trace/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/trace/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", + ], +) diff --git a/api/envoy/config/trace/v4alpha/trace.proto b/api/envoy/config/trace/v4alpha/trace.proto new file mode 100644 index 000000000000..c7e24549acff --- /dev/null +++ b/api/envoy/config/trace/v4alpha/trace.proto @@ -0,0 +1,271 @@ +syntax = "proto3"; + +package envoy.config.trace.v4alpha; + +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "opencensus/proto/trace/v1/trace_config.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; +option java_outer_classname = "TraceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tracing] +// Tracing :ref:`architecture overview `. + +// The tracing configuration specifies global +// settings for the HTTP tracer used by Envoy. The configuration is defined by +// the :ref:`Bootstrap ` :ref:`tracing +// ` field. Envoy may support other tracers +// in the future, but right now the HTTP tracer is the only one supported. +message Tracing { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.Tracing"; + + message Http { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.Tracing.Http"; + + reserved 2; + + reserved "config"; + + // The name of the HTTP trace driver to instantiate. The name must match a + // supported HTTP trace driver. Built-in trace drivers: + // + // - *envoy.tracers.lightstep* + // - *envoy.tracers.zipkin* + // - *envoy.tracers.dynamic_ot* + // - *envoy.tracers.datadog* + // - *envoy.tracers.opencensus* + // - *envoy.tracers.xray* + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Trace driver specific configuration which depends on the driver being instantiated. + // See the trace drivers for examples: + // + // - :ref:`LightstepConfig ` + // - :ref:`ZipkinConfig ` + // - :ref:`DynamicOtConfig ` + // - :ref:`DatadogConfig ` + // - :ref:`OpenCensusConfig ` + // - :ref:`AWS X-Ray ` + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + // Provides configuration for the HTTP tracer. + Http http = 1; +} + +// Configuration for the LightStep tracer. +// [#extension: envoy.tracers.lightstep] +message LightstepConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.LightstepConfig"; + + // Available propagation modes + enum PropagationMode { + // Propagate trace context in the single header x-ot-span-context. + ENVOY = 0; + + // Propagate trace context using LightStep's native format. + LIGHTSTEP = 1; + + // Propagate trace context using the b3 format. + B3 = 2; + + // Propagation trace context using the w3 trace-context standard. + TRACE_CONTEXT = 3; + } + + // The cluster manager cluster that hosts the LightStep collectors. + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // File containing the access token to the `LightStep + // `_ API. + string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Propagation modes to use by LightStep's tracer. + repeated PropagationMode propagation_modes = 3 + [(validate.rules).repeated = {items {enum {defined_only: true}}}]; +} + +// Configuration for the Zipkin tracer. +// [#extension: envoy.tracers.zipkin] +// [#next-free-field: 6] +message ZipkinConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.ZipkinConfig"; + + // Available Zipkin collector endpoint versions. + enum CollectorEndpointVersion { + // Zipkin API v1, JSON over HTTP. + // [#comment: The default implementation of Zipkin client before this field is added was only v1 + // and the way user configure this was by not explicitly specifying the version. Consequently, + // before this is added, the corresponding Zipkin collector expected to receive v1 payload. + // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when + // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, + // since in Zipkin realm this v1 version is considered to be not preferable anymore.] + DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 + [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; + + // Zipkin API v2, JSON over HTTP. + HTTP_JSON = 1; + + // Zipkin API v2, protobuf over HTTP. + HTTP_PROTO = 2; + + // [#not-implemented-hide:] + GRPC = 3; + } + + // The cluster manager cluster that hosts the Zipkin collectors. Note that the + // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster + // resources `. + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The API endpoint of the Zipkin service where the spans will be sent. When + // using a standard Zipkin installation, the API endpoint is typically + // /api/v1/spans, which is the default value. + string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Determines whether a 128bit trace id will be used when creating a new + // trace instance. The default value is false, which will result in a 64 bit trace id being used. + bool trace_id_128bit = 3; + + // Determines whether client and server spans will share the same span context. + // The default value is true. + google.protobuf.BoolValue shared_span_context = 4; + + // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be + // used. + CollectorEndpointVersion collector_endpoint_version = 5; +} + +// DynamicOtConfig is used to dynamically load a tracer from a shared library +// that implements the `OpenTracing dynamic loading API +// `_. +// [#extension: envoy.tracers.dynamic_ot] +message DynamicOtConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.DynamicOtConfig"; + + // Dynamic library implementing the `OpenTracing API + // `_. + string library = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The configuration to use when creating a tracer from the given dynamic + // library. + google.protobuf.Struct config = 2; +} + +// Configuration for the Datadog tracer. +// [#extension: envoy.tracers.datadog] +message DatadogConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.DatadogConfig"; + + // The cluster to use for submitting traces to the Datadog agent. + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The name used for the service when traces are generated by envoy. + string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Configuration for the OpenCensus tracer. +// [#next-free-field: 15] +// [#extension: envoy.tracers.opencensus] +message OpenCensusConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.OpenCensusConfig"; + + enum TraceContext { + // No-op default, no trace context is utilized. + NONE = 0; + + // W3C Trace-Context format "traceparent:" header. + TRACE_CONTEXT = 1; + + // Binary "grpc-trace-bin:" header. + GRPC_TRACE_BIN = 2; + + // "X-Cloud-Trace-Context:" header. + CLOUD_TRACE_CONTEXT = 3; + + // X-B3-* headers. + B3 = 4; + } + + reserved 7; + + // Configures tracing, e.g. the sampler, max number of annotations, etc. + opencensus.proto.trace.v1.TraceConfig trace_config = 1; + + // Enables the stdout exporter if set to true. This is intended for debugging + // purposes. + bool stdout_exporter_enabled = 2; + + // Enables the Stackdriver exporter if set to true. The project_id must also + // be set. + bool stackdriver_exporter_enabled = 3; + + // The Cloud project_id to use for Stackdriver tracing. + string stackdriver_project_id = 4; + + // (optional) By default, the Stackdriver exporter will connect to production + // Stackdriver. If stackdriver_address is non-empty, it will instead connect + // to this address, which is in the gRPC format: + // https://github.com/grpc/grpc/blob/master/doc/naming.md + string stackdriver_address = 10; + + // (optional) The gRPC server that hosts Stackdriver tracing service. Only + // Google gRPC is supported. If :ref:`target_uri ` + // is not provided, the default production Stackdriver address will be used. + core.v4alpha.GrpcService stackdriver_grpc_service = 13; + + // Enables the Zipkin exporter if set to true. The url and service name must + // also be set. + bool zipkin_exporter_enabled = 5; + + // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans" + string zipkin_url = 6; + + // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or + // ocagent_grpc_service must also be set. + bool ocagent_exporter_enabled = 11; + + // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC + // format: https://github.com/grpc/grpc/blob/master/doc/naming.md + // [#comment:TODO: deprecate this field] + string ocagent_address = 12; + + // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. + // This is only used if the ocagent_address is left empty. + core.v4alpha.GrpcService ocagent_grpc_service = 14; + + // List of incoming trace context headers we will accept. First one found + // wins. + repeated TraceContext incoming_trace_context = 8; + + // List of outgoing trace context headers we will produce. + repeated TraceContext outgoing_trace_context = 9; +} + +// Configuration structure. +message TraceServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.TraceServiceConfig"; + + // The upstream gRPC cluster that hosts the metrics service. + core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/config/trace/v4alpha/xray.proto b/api/envoy/config/trace/v4alpha/xray.proto new file mode 100644 index 000000000000..39bcebd1bad7 --- /dev/null +++ b/api/envoy/config/trace/v4alpha/xray.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.config.trace.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; +option java_outer_classname = "XrayProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: AWS X-Ray Tracer Configuration] +// Configuration for AWS X-Ray tracer + +message XRayConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.XRayConfig"; + + // The UDP endpoint of the X-Ray Daemon where the spans will be sent. + // If this value is not set, the default value of 127.0.0.1:2000 will be used. + core.v4alpha.SocketAddress daemon_endpoint = 1; + + // The name of the X-Ray segment. + string segment_name = 2 [(validate.rules).string = {min_len: 1}]; + + // The location of a local custom sampling rules JSON file. + // For an example of the sampling rules see: + // `X-Ray SDK documentation + // `_ + core.v4alpha.DataSource sampling_rule_manifest = 3; +} diff --git a/api/envoy/config/transport_socket/alts/v2alpha/alts.proto b/api/envoy/config/transport_socket/alts/v2alpha/alts.proto index 0ef4d12cca9e..92d5fb83a49c 100644 --- a/api/envoy/config/transport_socket/alts/v2alpha/alts.proto +++ b/api/envoy/config/transport_socket/alts/v2alpha/alts.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.transport_socket.alts.v2alpha; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v2alpha"; @@ -10,6 +11,7 @@ option java_outer_classname = "AltsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.alts.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: ALTS] // [#extension: envoy.transport_sockets.alts] diff --git a/api/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto b/api/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto index 027bd9a5a4b8..1b3fd395d572 100644 --- a/api/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto +++ b/api/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.transport_socket.raw_buffer.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.transport_socket.raw_buffer.v2"; option java_outer_classname = "RawBufferProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.raw_buffer.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Raw Buffer] // [#extension: envoy.transport_sockets.raw_buffer] diff --git a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto index 53add0a9f79e..0802c7558ad3 100644 --- a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto +++ b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto @@ -5,9 +5,8 @@ package envoy.config.transport_socket.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/config/common/tap/v2alpha/common.proto"; -import "udpa/annotations/status.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha"; @@ -16,6 +15,7 @@ option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tap.v3"; option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap] // [#extension: envoy.transport_sockets.tap] diff --git a/api/envoy/config/wasm/v2/wasm.proto b/api/envoy/config/wasm/v2/wasm.proto index dc62325286d7..21960a44f30d 100644 --- a/api/envoy/config/wasm/v2/wasm.proto +++ b/api/envoy/config/wasm/v2/wasm.proto @@ -5,12 +5,12 @@ package envoy.config.wasm.v2; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/migrate.proto"; -import "validate/validate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.wasm.v2"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.wasm.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Wasm service] diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD b/api/envoy/config/wasm/v3/BUILD similarity index 88% rename from generated_api_shadow/envoy/extensions/wasm/v3/BUILD rename to api/envoy/config/wasm/v3/BUILD index 8182a50ccaea..2c3dad6453b6 100644 --- a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD +++ b/api/envoy/config/wasm/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/wasm/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto b/api/envoy/config/wasm/v3/wasm.proto similarity index 81% rename from generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto rename to api/envoy/config/wasm/v3/wasm.proto index e334072dc5aa..56dfa6fc7e2a 100644 --- a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto +++ b/api/envoy/config/wasm/v3/wasm.proto @@ -1,24 +1,22 @@ syntax = "proto3"; -package envoy.extensions.wasm.v3; +package envoy.config.wasm.v3; import "envoy/config/core/v3/base.proto"; -import "udpa/annotations/versioning.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.wasm.v3"; +option java_package = "io.envoyproxy.envoy.config.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm service] // Configuration for a Wasm VM. // [#next-free-field: 6] message VmConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2.VmConfig"; - // An ID which will be used along with a hash of the wasm code (or null_vm_id) to determine which // VM will be used for the plugin. All plugins which use the same vm_id and code will use the same // VM. May be left blank. @@ -28,7 +26,7 @@ message VmConfig { string runtime = 2; // The Wasm code that Envoy will execute. - config.core.v3.AsyncDataSource code = 3; + core.v3.AsyncDataSource code = 3; // The Wasm configuration string used on initialization of a new VM (proxy_onStart). string configuration = 4; @@ -39,8 +37,6 @@ message VmConfig { // Base Configuration for Wasm Plugins, e.g. filters and services. message PluginConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2.PluginConfig"; - // A unique name for a filters/services in a VM for use in identifiying the filter/service if // multiple filters/services are handled by the same vm_id and root_id and for logging/debugging. string name = 1; @@ -62,8 +58,6 @@ message PluginConfig { // `. This opaque configuration will be used to // create a Wasm Service. message WasmService { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2.WasmService"; - // General plugin configuration. PluginConfig config = 1; diff --git a/api/envoy/data/accesslog/v2/BUILD b/api/envoy/data/accesslog/v2/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/api/envoy/data/accesslog/v2/BUILD +++ b/api/envoy/data/accesslog/v2/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/api/envoy/data/accesslog/v2/accesslog.proto b/api/envoy/data/accesslog/v2/accesslog.proto index 1c4e5ee13c73..af19197f62a6 100644 --- a/api/envoy/data/accesslog/v2/accesslog.proto +++ b/api/envoy/data/accesslog/v2/accesslog.proto @@ -10,11 +10,13 @@ import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.accesslog.v2"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC access logs] // Envoy access logs describe incoming interaction with Envoy over a fixed diff --git a/api/envoy/data/accesslog/v3/accesslog.proto b/api/envoy/data/accesslog/v3/accesslog.proto index fb81c6360667..374569d937f2 100644 --- a/api/envoy/data/accesslog/v3/accesslog.proto +++ b/api/envoy/data/accesslog/v3/accesslog.proto @@ -10,13 +10,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.accesslog.v3"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC access logs] // Envoy access logs describe incoming interaction with Envoy over a fixed diff --git a/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto b/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto index cec29bccc73f..3ea8bc2597fd 100644 --- a/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto +++ b/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto @@ -6,12 +6,14 @@ import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.cluster.v2alpha"; option java_outer_classname = "OutlierDetectionEventProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.data.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Outlier detection logging events] // :ref:`Outlier detection logging `. diff --git a/api/envoy/data/cluster/v3/outlier_detection_event.proto b/api/envoy/data/cluster/v3/outlier_detection_event.proto index 6e9d59b0e15d..ae1ad4c94d17 100644 --- a/api/envoy/data/cluster/v3/outlier_detection_event.proto +++ b/api/envoy/data/cluster/v3/outlier_detection_event.proto @@ -5,13 +5,14 @@ package envoy.data.cluster.v3; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.cluster.v3"; option java_outer_classname = "OutlierDetectionEventProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Outlier detection logging events] // :ref:`Outlier detection logging `. diff --git a/api/envoy/data/core/v2alpha/BUILD b/api/envoy/data/core/v2alpha/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/api/envoy/data/core/v2alpha/BUILD +++ b/api/envoy/data/core/v2alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/api/envoy/data/core/v2alpha/health_check_event.proto b/api/envoy/data/core/v2alpha/health_check_event.proto index 777cb9c270df..00fd69fd42d3 100644 --- a/api/envoy/data/core/v2alpha/health_check_event.proto +++ b/api/envoy/data/core/v2alpha/health_check_event.proto @@ -6,11 +6,13 @@ import "envoy/api/v2/core/address.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.core.v2alpha"; option java_outer_classname = "HealthCheckEventProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health check logging events] // :ref:`Health check logging `. diff --git a/api/envoy/data/core/v3/health_check_event.proto b/api/envoy/data/core/v3/health_check_event.proto index 7ad278876dcf..88b195b92b3d 100644 --- a/api/envoy/data/core/v3/health_check_event.proto +++ b/api/envoy/data/core/v3/health_check_event.proto @@ -6,13 +6,14 @@ import "envoy/config/core/v3/address.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.core.v3"; option java_outer_classname = "HealthCheckEventProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health check logging events] // :ref:`Health check logging `. diff --git a/api/envoy/config/filter/network/wasm/v2/BUILD b/api/envoy/data/dns/v2alpha/BUILD similarity index 87% rename from api/envoy/config/filter/network/wasm/v2/BUILD rename to api/envoy/data/dns/v2alpha/BUILD index 7903b3becced..702abad68ac1 100644 --- a/api/envoy/config/filter/network/wasm/v2/BUILD +++ b/api/envoy/data/dns/v2alpha/BUILD @@ -6,7 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/wasm/v2:pkg", + "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/data/dns/v2alpha/dns_table.proto b/api/envoy/data/dns/v2alpha/dns_table.proto new file mode 100644 index 000000000000..7a9e535c4f3a --- /dev/null +++ b/api/envoy/data/dns/v2alpha/dns_table.proto @@ -0,0 +1,74 @@ +syntax = "proto3"; + +package envoy.data.dns.v2alpha; + +import "envoy/type/matcher/string.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.data.dns.v2alpha"; +option java_outer_classname = "DnsTableProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: DNS Filter Table Data] +// :ref:`DNS Filter config overview `. + +// This message contains the configuration for the DNS Filter if populated +// from the control plane +message DnsTable { + // This message contains a list of IP addresses returned for a query for a known name + message AddressList { + // This field contains a well formed IP address that is returned + // in the answer for a name query. The address field can be an + // IPv4 or IPv6 address. Address family detection is done automatically + // when Envoy parses the string. Since this field is repeated, + // Envoy will return one randomly chosen entry from this list in the + // DNS response. The random index will vary per query so that we prevent + // clients pinning on a single address for a configured domain + repeated string address = 1 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // This message type is extensible and can contain a list of addresses + // or dictate some other method for resolving the addresses for an + // endpoint + message DnsEndpoint { + oneof endpoint_config { + option (validate.required) = true; + + AddressList address_list = 1; + } + } + + message DnsVirtualDomain { + // The domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + + // The configuration containing the method to determine the address + // of this endpoint + DnsEndpoint endpoint = 2; + + // Sets the TTL in dns answers from Envoy returned to the client + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + } + + // Control how many times envoy makes an attempt to forward a query to + // an external server + uint32 external_retry_count = 1; + + // Fully qualified domain names for which Envoy will respond to queries + repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + + // This field serves to help Envoy determine whether it can authoritatively + // answer a query for a name matching a suffix in this list. If the query + // name does not match a suffix in this list, Envoy will forward + // the query to an upstream DNS server + repeated type.matcher.StringMatcher known_suffixes = 3; +} diff --git a/api/envoy/data/dns/v3/BUILD b/api/envoy/data/dns/v3/BUILD new file mode 100644 index 000000000000..d61d877fef2e --- /dev/null +++ b/api/envoy/data/dns/v3/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/data/dns/v2alpha:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/data/dns/v3/dns_table.proto b/api/envoy/data/dns/v3/dns_table.proto new file mode 100644 index 000000000000..a6457e118672 --- /dev/null +++ b/api/envoy/data/dns/v3/dns_table.proto @@ -0,0 +1,85 @@ +syntax = "proto3"; + +package envoy.data.dns.v3; + +import "envoy/type/matcher/v3/string.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.data.dns.v3"; +option java_outer_classname = "DnsTableProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: DNS Filter Table Data] +// :ref:`DNS Filter config overview `. + +// This message contains the configuration for the DNS Filter if populated +// from the control plane +message DnsTable { + option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable"; + + // This message contains a list of IP addresses returned for a query for a known name + message AddressList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v2alpha.DnsTable.AddressList"; + + // This field contains a well formed IP address that is returned + // in the answer for a name query. The address field can be an + // IPv4 or IPv6 address. Address family detection is done automatically + // when Envoy parses the string. Since this field is repeated, + // Envoy will return one randomly chosen entry from this list in the + // DNS response. The random index will vary per query so that we prevent + // clients pinning on a single address for a configured domain + repeated string address = 1 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // This message type is extensible and can contain a list of addresses + // or dictate some other method for resolving the addresses for an + // endpoint + message DnsEndpoint { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v2alpha.DnsTable.DnsEndpoint"; + + oneof endpoint_config { + option (validate.required) = true; + + AddressList address_list = 1; + } + } + + message DnsVirtualDomain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; + + // The domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + + // The configuration containing the method to determine the address + // of this endpoint + DnsEndpoint endpoint = 2; + + // Sets the TTL in dns answers from Envoy returned to the client + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + } + + // Control how many times envoy makes an attempt to forward a query to + // an external server + uint32 external_retry_count = 1; + + // Fully qualified domain names for which Envoy will respond to queries + repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + + // This field serves to help Envoy determine whether it can authoritatively + // answer a query for a name matching a suffix in this list. If the query + // name does not match a suffix in this list, Envoy will forward + // the query to an upstream DNS server + repeated type.matcher.v3.StringMatcher known_suffixes = 3; +} diff --git a/api/envoy/data/tap/v2alpha/BUILD b/api/envoy/data/tap/v2alpha/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/api/envoy/data/tap/v2alpha/BUILD +++ b/api/envoy/data/tap/v2alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/api/envoy/data/tap/v2alpha/common.proto b/api/envoy/data/tap/v2alpha/common.proto index 93e33a2ea8d2..7c02aa771954 100644 --- a/api/envoy/data/tap/v2alpha/common.proto +++ b/api/envoy/data/tap/v2alpha/common.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.data.tap.v2alpha; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap common data] diff --git a/api/envoy/data/tap/v2alpha/http.proto b/api/envoy/data/tap/v2alpha/http.proto index bde7006e1093..60ea68b66d4a 100644 --- a/api/envoy/data/tap/v2alpha/http.proto +++ b/api/envoy/data/tap/v2alpha/http.proto @@ -5,9 +5,12 @@ package envoy.data.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/data/tap/v2alpha/common.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP tap data] diff --git a/api/envoy/data/tap/v2alpha/transport.proto b/api/envoy/data/tap/v2alpha/transport.proto index dfc7f0780611..82c2845ee338 100644 --- a/api/envoy/data/tap/v2alpha/transport.proto +++ b/api/envoy/data/tap/v2alpha/transport.proto @@ -7,9 +7,12 @@ import "envoy/data/tap/v2alpha/common.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "TransportProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Transport tap data] // Trace format for the tap transport socket extension. This dumps plain text read/write diff --git a/api/envoy/data/tap/v2alpha/wrapper.proto b/api/envoy/data/tap/v2alpha/wrapper.proto index 13d39b8d3585..769b95c6160a 100644 --- a/api/envoy/data/tap/v2alpha/wrapper.proto +++ b/api/envoy/data/tap/v2alpha/wrapper.proto @@ -5,11 +5,13 @@ package envoy.data.tap.v2alpha; import "envoy/data/tap/v2alpha/http.proto"; import "envoy/data/tap/v2alpha/transport.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "WrapperProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap data wrappers] diff --git a/api/envoy/data/tap/v3/common.proto b/api/envoy/data/tap/v3/common.proto index 85c1c39c5ee2..861da12e20c1 100644 --- a/api/envoy/data/tap/v3/common.proto +++ b/api/envoy/data/tap/v3/common.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.data.tap.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap common data] diff --git a/api/envoy/data/tap/v3/http.proto b/api/envoy/data/tap/v3/http.proto index a84bd9b10195..d4f05fa09522 100644 --- a/api/envoy/data/tap/v3/http.proto +++ b/api/envoy/data/tap/v3/http.proto @@ -5,11 +5,13 @@ package envoy.data.tap.v3; import "envoy/config/core/v3/base.proto"; import "envoy/data/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP tap data] diff --git a/api/envoy/data/tap/v3/transport.proto b/api/envoy/data/tap/v3/transport.proto index ee5125edfe87..f596759cb490 100644 --- a/api/envoy/data/tap/v3/transport.proto +++ b/api/envoy/data/tap/v3/transport.proto @@ -7,11 +7,13 @@ import "envoy/data/tap/v3/common.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "TransportProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Transport tap data] // Trace format for the tap transport socket extension. This dumps plain text read/write diff --git a/api/envoy/data/tap/v3/wrapper.proto b/api/envoy/data/tap/v3/wrapper.proto index 3320833aab62..636547614c26 100644 --- a/api/envoy/data/tap/v3/wrapper.proto +++ b/api/envoy/data/tap/v3/wrapper.proto @@ -5,13 +5,14 @@ package envoy.data.tap.v3; import "envoy/data/tap/v3/http.proto"; import "envoy/data/tap/v3/transport.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "WrapperProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap data wrappers] diff --git a/api/envoy/extensions/access_loggers/file/v3/file.proto b/api/envoy/extensions/access_loggers/file/v3/file.proto index 1bcf1afd9422..f3c9c0a11612 100644 --- a/api/envoy/extensions/access_loggers/file/v3/file.proto +++ b/api/envoy/extensions/access_loggers/file/v3/file.proto @@ -4,13 +4,14 @@ package envoy.extensions.access_loggers.file.v3; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v3"; option java_outer_classname = "FileProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: File access log] // [#extension: envoy.access_loggers.file] diff --git a/api/envoy/extensions/access_loggers/grpc/v3/als.proto b/api/envoy/extensions/access_loggers/grpc/v3/als.proto index 7e059bb55cdc..3cc154416627 100644 --- a/api/envoy/extensions/access_loggers/grpc/v3/als.proto +++ b/api/envoy/extensions/access_loggers/grpc/v3/als.proto @@ -7,13 +7,14 @@ import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.access_loggers.grpc.v3"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Access Log Service (ALS)] diff --git a/api/envoy/extensions/access_loggers/wasm/v3/BUILD b/api/envoy/extensions/access_loggers/wasm/v3/BUILD index c25dbab0011c..9c848eaafb17 100644 --- a/api/envoy/extensions/access_loggers/wasm/v3/BUILD +++ b/api/envoy/extensions/access_loggers/wasm/v3/BUILD @@ -7,7 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v2:pkg", - "//envoy/extensions/wasm/v3:pkg", + "//envoy/config/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto index 19430e582090..2513a17a7fe6 100644 --- a/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto +++ b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto @@ -2,17 +2,15 @@ syntax = "proto3"; package envoy.extensions.access_loggers.wasm.v3; -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "google/protobuf/struct.proto"; +import "envoy/config/wasm/v3/wasm.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - option java_package = "io.envoyproxy.envoy.extensions.access_loggers.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm access log] @@ -23,5 +21,5 @@ message WasmAccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.WasmAccessLog"; - envoy.extensions.wasm.v3.PluginConfig config = 1; + config.wasm.v3.PluginConfig config = 1; } diff --git a/api/envoy/extensions/clusters/aggregate/v3/cluster.proto b/api/envoy/extensions/clusters/aggregate/v3/cluster.proto index 0d00e7b444ba..aead1c451739 100644 --- a/api/envoy/extensions/clusters/aggregate/v3/cluster.proto +++ b/api/envoy/extensions/clusters/aggregate/v3/cluster.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.clusters.aggregate.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.clusters.aggregate.v3"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Aggregate cluster configuration] diff --git a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto index bbd1c833a7fd..6f100d9dbb7e 100644 --- a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto +++ b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto @@ -4,13 +4,14 @@ package envoy.extensions.clusters.dynamic_forward_proxy.v3; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.clusters.dynamic_forward_proxy.v3"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy cluster configuration] diff --git a/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto b/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto index 7975b2eb659f..cf01359e55ab 100644 --- a/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto +++ b/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto @@ -5,13 +5,14 @@ package envoy.extensions.clusters.redis.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.clusters.redis.v3"; option java_outer_classname = "RedisClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Redis Cluster Configuration] // This cluster adds support for `Redis Cluster `_, as part diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 7ec4a4fcd817..7c72af35af33 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -7,13 +7,14 @@ import "envoy/config/cluster/v3/cluster.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.dynamic_forward_proxy.v3"; option java_outer_classname = "DnsCacheProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy common configuration] @@ -48,7 +49,12 @@ message DnsCacheConfig { // // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be // added in a future change. - google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gt {}}]; + // + // .. note: + // + // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. + google.protobuf.Duration dns_refresh_rate = 3 + [(validate.rules).duration = {gte {nanos: 1000000}}]; // The TTL for hosts that are unused. Hosts that have not been used in the configured time // interval will be purged. If not specified defaults to 5m. diff --git a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto index 1410e24b12a4..187ae3f229c4 100644 --- a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.common.ratelimit.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.ratelimit.v3"; option java_outer_classname = "RatelimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common rate limit components] diff --git a/api/envoy/extensions/common/tap/v3/common.proto b/api/envoy/extensions/common/tap/v3/common.proto index 6e951c5e475d..46a25b164d67 100644 --- a/api/envoy/extensions/common/tap/v3/common.proto +++ b/api/envoy/extensions/common/tap/v3/common.proto @@ -5,13 +5,14 @@ package envoy.extensions.common.tap.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.tap.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common tap extension configuration] diff --git a/api/envoy/extensions/common/tap/v4alpha/BUILD b/api/envoy/extensions/common/tap/v4alpha/BUILD new file mode 100644 index 000000000000..d1fe49142a8e --- /dev/null +++ b/api/envoy/extensions/common/tap/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/tap/v3:pkg", + "//envoy/extensions/common/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/common/tap/v4alpha/common.proto b/api/envoy/extensions/common/tap/v4alpha/common.proto new file mode 100644 index 000000000000..63de14a3d6f6 --- /dev/null +++ b/api/envoy/extensions/common/tap/v4alpha/common.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package envoy.extensions.common.tap.v4alpha; + +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/tap/v3/common.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.common.tap.v4alpha"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common tap extension configuration] + +// Common configuration for all tap extensions. +message CommonExtensionConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.common.tap.v3.CommonExtensionConfig"; + + // [#not-implemented-hide:] + message TapDSConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.common.tap.v3.CommonExtensionConfig.TapDSConfig"; + + // Configuration for the source of TapDS updates for this Cluster. + config.core.v4alpha.ConfigSource config_source = 1 + [(validate.rules).message = {required: true}]; + + // Tap config to request from XDS server. + string name = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + oneof config_type { + option (validate.required) = true; + + // If specified, the tap filter will be configured via an admin handler. + AdminConfig admin_config = 1; + + // If specified, the tap filter will be configured via a static configuration that cannot be + // changed. + config.tap.v3.TapConfig static_config = 2; + + // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. + TapDSConfig tapds_config = 3; + } +} + +// Configuration for the admin handler. See :ref:`here ` for +// more information. +message AdminConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.common.tap.v3.AdminConfig"; + + // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is + // matched to the configured filter opaque ID to determine which filter to configure. + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; +} diff --git a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD b/api/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD new file mode 100644 index 000000000000..d011b4d830ad --- /dev/null +++ b/api/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", + "//envoy/data/dns/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto b/api/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto new file mode 100644 index 000000000000..38a8872d323e --- /dev/null +++ b/api/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package envoy.extensions.filter.udp.dns_filter.v3alpha; + +import "envoy/config/core/v3/base.proto"; +import "envoy/data/dns/v3/dns_table.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filter.udp.dns_filter.v3alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig"; + + // This message contains the configuration for the Dns Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig.ServerContextConfig"; + + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v3.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + config.core.v3.DataSource external_dns_table = 2; + } + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration + ServerContextConfig server_config = 2; +} diff --git a/api/envoy/extensions/filters/common/fault/v3/fault.proto b/api/envoy/extensions/filters/common/fault/v3/fault.proto index 9976e17ce718..dc4f2c34efb4 100644 --- a/api/envoy/extensions/filters/common/fault/v3/fault.proto +++ b/api/envoy/extensions/filters/common/fault/v3/fault.proto @@ -6,14 +6,15 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.common.fault.v3"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common fault injection types] diff --git a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index e806c9761138..3d2ef3e96d96 100644 --- a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -9,13 +9,14 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.adaptive_concurrency.v3"; option java_outer_classname = "AdaptiveConcurrencyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Adaptive Concurrency] // Adaptive Concurrency Control :ref:`configuration overview diff --git a/api/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto b/api/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto index f41639d9b5b1..b4b9cc398f2e 100644 --- a/api/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto +++ b/api/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.aws_lambda.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_lambda.v3"; option java_outer_classname = "AwsLambdaProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AWS Lambda] // AWS Lambda :ref:`configuration overview `. @@ -19,6 +20,17 @@ message Config { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.aws_lambda.v2alpha.Config"; + enum InvocationMode { + // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In + // this mode the output of the Lambda function becomes the response of the HTTP request. + SYNCHRONOUS = 0; + + // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be + // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the + // call which is translated to an HTTP 200 OK by the filter. + ASYNCHRONOUS = 1; + } + // The ARN of the AWS Lambda to invoke when the filter is engaged // Must be in the following format: // arn::lambda:::function: @@ -26,6 +38,9 @@ message Config { // Whether to transform the request (headers and body) to a JSON payload or pass it as is. bool payload_passthrough = 2; + + // Determines the way to invoke the Lambda function. + InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; } // Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different diff --git a/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto b/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto index e46ef3170262..b80bc1b82108 100644 --- a/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto +++ b/api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.aws_request_signing.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_request_signing.v3"; option java_outer_classname = "AwsRequestSigningProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AwsRequestSigning] // AwsRequestSigning :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/buffer/v3/buffer.proto b/api/envoy/extensions/filters/http/buffer/v3/buffer.proto index 59ffa83ac3f2..6f73244032c4 100644 --- a/api/envoy/extensions/filters/http/buffer/v3/buffer.proto +++ b/api/envoy/extensions/filters/http/buffer/v3/buffer.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.buffer.v3; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.buffer.v3"; option java_outer_classname = "BufferProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Buffer] // Buffer :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto index 26016442bb0a..1ff305bb0e27 100644 --- a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto @@ -9,13 +9,13 @@ import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha"; option java_outer_classname = "CacheProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Cache Filter] // [#extension: envoy.filters.http.cache] diff --git a/api/envoy/extensions/filters/http/compressor/v3/compressor.proto b/api/envoy/extensions/filters/http/compressor/v3/compressor.proto index 7d506a2152af..0eefe55140d2 100644 --- a/api/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ b/api/envoy/extensions/filters/http/compressor/v3/compressor.proto @@ -6,11 +6,13 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v3"; option java_outer_classname = "CompressorProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Compressor] diff --git a/api/envoy/extensions/filters/http/cors/v3/cors.proto b/api/envoy/extensions/filters/http/cors/v3/cors.proto index fd41e76449ae..0269e1bdfd8c 100644 --- a/api/envoy/extensions/filters/http/cors/v3/cors.proto +++ b/api/envoy/extensions/filters/http/cors/v3/cors.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.http.cors.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.cors.v3"; option java_outer_classname = "CorsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cors] // CORS Filter :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/csrf/v3/csrf.proto b/api/envoy/extensions/filters/http/csrf/v3/csrf.proto index 7748abf88d36..263d705e3f54 100644 --- a/api/envoy/extensions/filters/http/csrf/v3/csrf.proto +++ b/api/envoy/extensions/filters/http/csrf/v3/csrf.proto @@ -5,13 +5,14 @@ package envoy.extensions.filters.http.csrf.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/matcher/v3/string.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v3"; option java_outer_classname = "CsrfProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: CSRF] // Cross-Site Request Forgery :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto index 35e8a608d473..b8a2525dbf54 100644 --- a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto +++ b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.dynamic_forward_proxy.v3; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamic_forward_proxy.v3"; option java_outer_classname = "DynamicForwardProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy] diff --git a/api/envoy/extensions/filters/http/dynamo/v3/dynamo.proto b/api/envoy/extensions/filters/http/dynamo/v3/dynamo.proto index baf977e00334..13a4f1c6ceee 100644 --- a/api/envoy/extensions/filters/http/dynamo/v3/dynamo.proto +++ b/api/envoy/extensions/filters/http/dynamo/v3/dynamo.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.http.dynamo.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamo.v3"; option java_outer_classname = "DynamoProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamo] // Dynamo :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 747378d986c8..44673ad6ff26 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -8,14 +8,15 @@ import "envoy/config/core/v3/http_uri.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v3"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/fault/v3/fault.proto b/api/envoy/extensions/filters/http/fault/v3/fault.proto index 6127ca848460..534a0da35b16 100644 --- a/api/envoy/extensions/filters/http/fault/v3/fault.proto +++ b/api/envoy/extensions/filters/http/fault/v3/fault.proto @@ -8,13 +8,14 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v3"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Fault Injection] // Fault Injection :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto b/api/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto index 3835db1c0584..7e31da49e92b 100644 --- a/api/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto +++ b/api/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_http1_bridge.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_bridge.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC HTTP/1.1 Bridge] // gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto b/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto index 88a09075d6f4..85d7cbe1cecd 100644 --- a/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto +++ b/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] // gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview diff --git a/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto b/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto index 5da637cefeb2..3082089202ee 100644 --- a/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto +++ b/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_json_transcoder.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_json_transcoder.v3"; option java_outer_classname = "TranscoderProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC-JSON transcoder] // gRPC-JSON transcoder :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/grpc_stats/v3/BUILD b/api/envoy/extensions/filters/http/grpc_stats/v3/BUILD index 6416ce6b09a5..cfae56e4cac3 100644 --- a/api/envoy/extensions/filters/http/grpc_stats/v3/BUILD +++ b/api/envoy/extensions/filters/http/grpc_stats/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/grpc_stats/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/extensions/filters/http/grpc_stats/v3/config.proto b/api/envoy/extensions/filters/http/grpc_stats/v3/config.proto index bc3574562b32..1fecdaea0a16 100644 --- a/api/envoy/extensions/filters/http/grpc_stats/v3/config.proto +++ b/api/envoy/extensions/filters/http/grpc_stats/v3/config.proto @@ -2,13 +2,18 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_stats.v3; -import "udpa/annotations/versioning.proto"; +import "envoy/config/core/v3/grpc_method_list.proto"; + +import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_stats.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC statistics] gRPC statistics filter // :ref:`configuration overview `. @@ -22,6 +27,33 @@ message FilterConfig { // If true, the filter maintains a filter state object with the request and response message // counts. bool emit_filter_state = 1; + + oneof per_method_stat_specifier { + // If set, specifies an allowlist of service/methods that will have individual stats + // emitted for them. Any call that does not match the allowlist will be counted + // in a stat with no method specifier: `cluster..grpc.*`. + config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2; + + // If set to true, emit stats for all service/method names. + // + // If set to false, emit stats for all service/message types to the same stats without including + // the service/method in the name, with prefix `cluster..grpc`. This can be useful if + // service/method granularity is not needed, or if each cluster only receives a single method. + // + // .. attention:: + // This option is only safe if all clients are trusted. If this option is enabled + // with untrusted clients, the clients could cause unbounded growth in the number of stats in + // Envoy, using unbounded memory and potentially slowing down stats pipelines. + // + // .. attention:: + // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the + // behavior will default to `stats_for_all_methods=true`. This default value is deprecated, + // and in a future release, if neither field is set, it will default to + // `stats_for_all_methods=false` in order to be safe by default. This behavior can be + // controlled with runtime override + // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. + google.protobuf.BoolValue stats_for_all_methods = 3; + } } // gRPC statistics filter state object in protobuf form. diff --git a/api/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto b/api/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto index 2c881a78d56e..8161139f547b 100644 --- a/api/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto +++ b/api/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_web.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_web.v3"; option java_outer_classname = "GrpcWebProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Web] // gRPC Web :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/gzip/v3/gzip.proto b/api/envoy/extensions/filters/http/gzip/v3/gzip.proto index fa56aa9fab7b..eb8a69f083ba 100644 --- a/api/envoy/extensions/filters/http/gzip/v3/gzip.proto +++ b/api/envoy/extensions/filters/http/gzip/v3/gzip.proto @@ -6,13 +6,14 @@ import "envoy/extensions/filters/http/compressor/v3/compressor.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.gzip.v3"; option java_outer_classname = "GzipProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Gzip] // Gzip :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 249c61298087..8e7c490f01b6 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.header_to_metadata.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v3"; option java_outer_classname = "HeaderToMetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Header-To-Metadata Filter] // @@ -77,7 +78,8 @@ message Config { "envoy.config.filter.http.header_to_metadata.v2.Config.Rule"; // The header that triggers this rule — required. - string header = 1 [(validate.rules).string = {min_bytes: 1}]; + string header = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If the header is present, apply this metadata KeyValuePair. // diff --git a/api/envoy/extensions/filters/http/health_check/v3/health_check.proto b/api/envoy/extensions/filters/http/health_check/v3/health_check.proto index 87aac0de0232..1a5dbf1bb900 100644 --- a/api/envoy/extensions/filters/http/health_check/v3/health_check.proto +++ b/api/envoy/extensions/filters/http/health_check/v3/health_check.proto @@ -8,13 +8,14 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v3"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto b/api/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto index 9f77f7ba763b..a23ad9dea0a9 100644 --- a/api/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto +++ b/api/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.ip_tagging.v3; import "envoy/config/core/v3/address.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ip_tagging.v3"; option java_outer_classname = "IpTaggingProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: IP tagging] // IP tagging :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 577e857c0e49..1aabe1bd4390 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -9,13 +9,14 @@ import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: JWT Authentication] // JWT Authentication :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/lua/v3/lua.proto b/api/envoy/extensions/filters/http/lua/v3/lua.proto index a05081402d75..da6b0c09a0f6 100644 --- a/api/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/api/envoy/extensions/filters/http/lua/v3/lua.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.lua.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.lua.v3"; option java_outer_classname = "LuaProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Lua] // Lua :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/on_demand/v3/on_demand.proto b/api/envoy/extensions/filters/http/on_demand/v3/on_demand.proto index 81c169489c53..5c6b96540c19 100644 --- a/api/envoy/extensions/filters/http/on_demand/v3/on_demand.proto +++ b/api/envoy/extensions/filters/http/on_demand/v3/on_demand.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.on_demand.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.on_demand.v3"; option java_outer_classname = "OnDemandProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: OnDemand] // IP tagging :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/original_src/v3/original_src.proto b/api/envoy/extensions/filters/http/original_src/v3/original_src.proto index 33d122f2d6a9..507c9728fbbf 100644 --- a/api/envoy/extensions/filters/http/original_src/v3/original_src.proto +++ b/api/envoy/extensions/filters/http/original_src/v3/original_src.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.original_src.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.original_src.v3"; option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index fda94812818e..057b7c3d4403 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -6,13 +6,14 @@ import "envoy/config/ratelimit/v3/rls.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ratelimit.v3"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/rbac/v3/rbac.proto b/api/envoy/extensions/filters/http/rbac/v3/rbac.proto index 6d5ec317e970..bae67ec5a0a9 100644 --- a/api/envoy/extensions/filters/http/rbac/v3/rbac.proto +++ b/api/envoy/extensions/filters/http/rbac/v3/rbac.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.rbac.v3; import "envoy/config/rbac/v3/rbac.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v3"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/rbac/v4alpha/BUILD b/api/envoy/extensions/filters/http/rbac/v4alpha/BUILD new file mode 100644 index 000000000000..bd16c3f2a0d6 --- /dev/null +++ b/api/envoy/extensions/filters/http/rbac/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/rbac/v4alpha:pkg", + "//envoy/extensions/filters/http/rbac/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto b/api/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto new file mode 100644 index 000000000000..ec65f5d7bcb6 --- /dev/null +++ b/api/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.rbac.v4alpha; + +import "envoy/config/rbac/v4alpha/rbac.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v4alpha"; +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: RBAC] +// Role-Based Access Control :ref:`configuration overview `. +// [#extension: envoy.filters.http.rbac] + +// RBAC filter config. +message RBAC { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.rbac.v3.RBAC"; + + // Specify the RBAC rules to be applied globally. + // If absent, no enforcing RBAC policy will be applied. + config.rbac.v4alpha.RBAC rules = 1; + + // Shadow rules are not enforced by the filter (i.e., returning a 403) + // but will emit stats and logs and can be used for rule testing. + // If absent, no shadow RBAC policy will be applied. + config.rbac.v4alpha.RBAC shadow_rules = 2; +} + +message RBACPerRoute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.rbac.v3.RBACPerRoute"; + + reserved 1; + + // Override the global configuration of the filter with this new config. + // If absent, the global RBAC policy will be disabled for this route. + RBAC rbac = 2; +} diff --git a/api/envoy/extensions/filters/http/router/v3/router.proto b/api/envoy/extensions/filters/http/router/v3/router.proto index f26d86630623..6ab64f92f2b0 100644 --- a/api/envoy/extensions/filters/http/router/v3/router.proto +++ b/api/envoy/extensions/filters/http/router/v3/router.proto @@ -6,13 +6,14 @@ import "envoy/config/accesslog/v3/accesslog.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v3"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] // Router :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/squash/v3/squash.proto b/api/envoy/extensions/filters/http/squash/v3/squash.proto index 9222b786b0c1..0ea335a414fa 100644 --- a/api/envoy/extensions/filters/http/squash/v3/squash.proto +++ b/api/envoy/extensions/filters/http/squash/v3/squash.proto @@ -5,13 +5,14 @@ package envoy.extensions.filters.http.squash.v3; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.squash.v3"; option java_outer_classname = "SquashProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Squash] // Squash :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/tap/v3/tap.proto b/api/envoy/extensions/filters/http/tap/v3/tap.proto index 1f92e910b862..81779443e4a5 100644 --- a/api/envoy/extensions/filters/http/tap/v3/tap.proto +++ b/api/envoy/extensions/filters/http/tap/v3/tap.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.tap.v3; import "envoy/extensions/common/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/http/tap/v4alpha/BUILD b/api/envoy/extensions/filters/http/tap/v4alpha/BUILD new file mode 100644 index 000000000000..5204b739b76c --- /dev/null +++ b/api/envoy/extensions/filters/http/tap/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/common/tap/v4alpha:pkg", + "//envoy/extensions/filters/http/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/tap/v4alpha/tap.proto b/api/envoy/extensions/filters/http/tap/v4alpha/tap.proto new file mode 100644 index 000000000000..98798be8bfd2 --- /dev/null +++ b/api/envoy/extensions/filters/http/tap/v4alpha/tap.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.tap.v4alpha; + +import "envoy/extensions/common/tap/v4alpha/common.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap] +// Tap :ref:`configuration overview `. +// [#extension: envoy.filters.http.tap] + +// Top level configuration for the tap filter. +message Tap { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.tap.v3.Tap"; + + // Common configuration for the HTTP tap filter. + common.tap.v4alpha.CommonExtensionConfig common_config = 1 + [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/extensions/filters/http/wasm/v3/BUILD b/api/envoy/extensions/filters/http/wasm/v3/BUILD index 6d6a05070f89..31c49afb4c5c 100644 --- a/api/envoy/extensions/filters/http/wasm/v3/BUILD +++ b/api/envoy/extensions/filters/http/wasm/v3/BUILD @@ -6,8 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/filter/http/wasm/v2:pkg", - "//envoy/extensions/wasm/v3:pkg", + "//envoy/config/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/filters/http/wasm/v3/wasm.proto b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto index ff23cb3c31a6..a8e583c921e2 100644 --- a/api/envoy/extensions/filters/http/wasm/v3/wasm.proto +++ b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto @@ -2,23 +2,21 @@ syntax = "proto3"; package envoy.extensions.filters.http.wasm.v3; -import "envoy/extensions/wasm/v3/wasm.proto"; +import "envoy/config/wasm/v3/wasm.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] // Wasm :ref:`configuration overview `. message Wasm { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.wasm.v2.Wasm"; - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; + config.wasm.v3.PluginConfig config = 1; } diff --git a/api/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto b/api/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto index 869eef571235..cb439b0973ba 100644 --- a/api/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto +++ b/api/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.listener.http_inspector.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.http_inspector.v3"; option java_outer_classname = "HttpInspectorProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Inspector Filter] // Detect whether the application protocol is HTTP. diff --git a/api/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto b/api/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto index 962306b0a4fc..8239c5c42c52 100644 --- a/api/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto +++ b/api/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.listener.original_dst.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_dst.v3"; option java_outer_classname = "OriginalDstProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Original Dst Filter] // Use the Original destination address on downstream connections. diff --git a/api/envoy/extensions/filters/listener/original_src/v3/original_src.proto b/api/envoy/extensions/filters/listener/original_src/v3/original_src.proto index f35bd821582d..5fd07924d7fd 100644 --- a/api/envoy/extensions/filters/listener/original_src/v3/original_src.proto +++ b/api/envoy/extensions/filters/listener/original_src/v3/original_src.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.listener.original_src.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_src.v3"; option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. diff --git a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto index 67eff4341ef7..63ad72945e28 100644 --- a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.listener.proxy_protocol.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3"; option java_outer_classname = "ProxyProtocolProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Proxy Protocol Filter] // PROXY protocol listener filter. diff --git a/api/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto b/api/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto index bf6fa7224b63..eff9774844f4 100644 --- a/api/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto +++ b/api/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.listener.tls_inspector.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.tls_inspector.v3"; option java_outer_classname = "TlsInspectorProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: TLS Inspector Filter] // Allows detecting whether the transport appears to be TLS or plaintext. diff --git a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index 175793d68b7a..e2da157574f8 100644 --- a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -6,13 +6,14 @@ import "envoy/config/core/v3/address.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.client_ssl_auth.v3"; option java_outer_classname = "ClientSslAuthProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Client TLS authentication] // Client TLS authentication diff --git a/api/envoy/extensions/filters/network/direct_response/v3/config.proto b/api/envoy/extensions/filters/network/direct_response/v3/config.proto index fba1384f3c43..2742372b2f91 100644 --- a/api/envoy/extensions/filters/network/direct_response/v3/config.proto +++ b/api/envoy/extensions/filters/network/direct_response/v3/config.proto @@ -4,11 +4,13 @@ package envoy.extensions.filters.network.direct_response.v3; import "envoy/config/core/v3/base.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.direct_response.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Direct response] // Direct response :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto b/api/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto index 7ba81d8596f8..fa1959a425c8 100644 --- a/api/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto +++ b/api/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.network.dubbo_proxy.router.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.router.v3"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] // Dubbo router :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto index 089d79868158..749708880d71 100644 --- a/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto @@ -6,13 +6,14 @@ import "envoy/extensions/filters/network/dubbo_proxy/v3/route.proto"; import "google/protobuf/any.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; option java_outer_classname = "DubboProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto index 63e3c13a2987..f06518c0b672 100644 --- a/api/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto @@ -6,13 +6,14 @@ import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/range.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dubbo Proxy Route Configuration] // Dubbo Proxy :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/echo/v3/echo.proto b/api/envoy/extensions/filters/network/echo/v3/echo.proto index edbc79a1ab25..077d87259b6b 100644 --- a/api/envoy/extensions/filters/network/echo/v3/echo.proto +++ b/api/envoy/extensions/filters/network/echo/v3/echo.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.network.echo.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.echo.v3"; option java_outer_classname = "EchoProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Echo] // Echo :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto index 82e5b8145db6..c3a63ac0a4f6 100644 --- a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.network.ext_authz.v3; import "envoy/config/core/v3/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v3"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Network External Authorization ] // The network layer external authorization service configuration diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index c3b4375e25b0..06d66055a0d6 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -16,20 +16,21 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3"; option java_outer_classname = "HttpConnectionManagerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 36] +// [#next-free-field: 37] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -325,7 +326,7 @@ message HttpConnectionManager { // timeout, although per-route idle timeout overrides will continue to apply. google.protobuf.Duration stream_idle_timeout = 24; - // A timeout for idle requests managed by the connection manager. + // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. @@ -482,6 +483,18 @@ message HttpConnectionManager { // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of // `HTTP spec ` and is provided for convenience. bool merge_slashes = 33; + + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. + // + // If not set, Envoy uses the default UUID-based behavior: + // + // 1. Request ID is propagated using *x-request-id* header. + // + // 2. Request ID is a universally unique identifier (UUID). + // + // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. + RequestIDExtension request_id_extension = 36; } message Rds { @@ -661,3 +674,11 @@ message HttpFilter { google.protobuf.Any typed_config = 4; } } + +message RequestIDExtension { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.network.http_connection_manager.v2.RequestIDExtension"; + + // Request ID extension specific configuration. + google.protobuf.Any typed_config = 1; +} diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD new file mode 100644 index 000000000000..792ccf7ab677 --- /dev/null +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD @@ -0,0 +1,19 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/accesslog/v3:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/config/trace/v4alpha:pkg", + "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", + "//envoy/type/tracing/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto new file mode 100644 index 000000000000..226dc2727fc5 --- /dev/null +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -0,0 +1,685 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.http_connection_manager.v4alpha; + +import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/protocol.proto"; +import "envoy/config/route/v4alpha/route.proto"; +import "envoy/config/route/v4alpha/scoped_route.proto"; +import "envoy/config/trace/v4alpha/trace.proto"; +import "envoy/type/tracing/v3/custom_tag.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v4alpha"; +option java_outer_classname = "HttpConnectionManagerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP connection manager] +// HTTP connection manager :ref:`configuration overview `. +// [#extension: envoy.filters.network.http_connection_manager] + +// [#next-free-field: 37] +message HttpConnectionManager { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; + + enum CodecType { + // For every new connection, the connection manager will determine which + // codec to use. This mode supports both ALPN for TLS listeners as well as + // protocol inference for plaintext listeners. If ALPN data is available, it + // is preferred, otherwise protocol inference is used. In almost all cases, + // this is the right option to choose for this setting. + AUTO = 0; + + // The connection manager will assume that the client is speaking HTTP/1.1. + HTTP1 = 1; + + // The connection manager will assume that the client is speaking HTTP/2 + // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. + // Prior knowledge is allowed). + HTTP2 = 2; + + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + HTTP3 = 3; + } + + enum ServerHeaderTransformation { + // Overwrite any Server header with the contents of server_name. + OVERWRITE = 0; + + // If no Server header is present, append Server server_name + // If a Server header is present, pass it through. + APPEND_IF_ABSENT = 1; + + // Pass through the value of the server header, and do not append a header + // if none is present. + PASS_THROUGH = 2; + } + + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + enum ForwardClientCertDetails { + // Do not send the XFCC header to the next hop. This is the default value. + SANITIZE = 0; + + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + FORWARD_ONLY = 1; + + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + APPEND_FORWARD = 2; + + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + SANITIZE_SET = 3; + + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + ALWAYS_FORWARD_ONLY = 4; + } + + // [#next-free-field: 10] + message Tracing { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing"; + + enum OperationName { + // The HTTP listener is used for ingress/incoming requests. + INGRESS = 0; + + // The HTTP listener is used for egress/outgoing requests. + EGRESS = 1; + } + + reserved 1, 2; + + reserved "operation_name", "request_headers_for_tags"; + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + type.v3.Percent client_sampling = 3; + + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.Percent random_sampling = 4; + + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.Percent overall_sampling = 5; + + // Whether to annotate spans with additional data. If true, spans will include logs for stream + // events. + bool verbose = 6; + + // Maximum length of the request path to extract and include in the HttpUrl tag. Used to + // truncate lengthy request paths to meet the needs of a tracing backend. + // Default: 256 + google.protobuf.UInt32Value max_path_tag_length = 7; + + // A list of custom tags with unique tag name to create tags for the active span. + repeated type.tracing.v3.CustomTag custom_tags = 8; + + // Configuration for an external tracing provider. + // If not specified, Envoy will fall back to using tracing provider configuration + // from the bootstrap config. + // [#not-implemented-hide:] + config.trace.v4alpha.Tracing.Http provider = 9; + } + + message InternalAddressConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." + "InternalAddressConfig"; + + // Whether unix socket addresses should be considered internal. + bool unix_sockets = 1; + } + + // [#next-free-field: 7] + message SetCurrentClientCertDetails { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." + "SetCurrentClientCertDetails"; + + reserved 2; + + // Whether to forward the subject of the client cert. Defaults to false. + google.protobuf.BoolValue subject = 1; + + // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + // XFCC header comma separated from other values with the value Cert="PEM". + // Defaults to false. + bool cert = 3; + + // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + // format. This will appear in the XFCC header comma separated from other values with the value + // Chain="PEM". + // Defaults to false. + bool chain = 6; + + // Whether to forward the DNS type Subject Alternative Names of the client cert. + // Defaults to false. + bool dns = 4; + + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. + bool uri = 5; + } + + // The configuration for HTTP upgrades. + // For each upgrade type desired, an UpgradeConfig must be added. + // + // .. warning:: + // + // The current implementation of upgrade headers does not handle + // multi-valued upgrade headers. Support for multi-valued headers may be + // added in the future if needed. + // + // .. warning:: + // The current implementation of upgrade headers does not work with HTTP/2 + // upstreams. + message UpgradeConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." + "UpgradeConfig"; + + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] + // will be proxied upstream. + string upgrade_type = 1; + + // If present, this represents the filter chain which will be created for + // this type of upgrade. If no filters are present, the filter chain for + // HTTP connections will be used for this upgrade type. + repeated HttpFilter filters = 2; + + // Determines if upgrades are enabled or disabled by default. Defaults to true. + // This can be overridden on a per-route basis with :ref:`cluster + // ` as documented in the + // :ref:`upgrade documentation `. + google.protobuf.BoolValue enabled = 3; + } + + reserved 27, 11; + + reserved "idle_timeout"; + + // Supplies the type of codec that the connection manager should use. + CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics for the + // connection manager. See the :ref:`statistics documentation ` for + // more information. + string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; + + // The route table for the connection manager is static and is specified in this property. + config.route.v4alpha.RouteConfiguration route_config = 4; + + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes scoped_routes = 31; + } + + // A list of individual HTTP filters that make up the filter chain for + // requests made to the connection manager. Order matters as the filters are + // processed sequentially as request events happen. + repeated HttpFilter http_filters = 5; + + // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + // documentation for more information. Defaults to false. + google.protobuf.BoolValue add_user_agent = 6; + + // Presence of the object defines whether the connection manager + // emits :ref:`tracing ` data to the :ref:`configured tracing provider + // `. + Tracing tracing = 7; + + // Additional settings for HTTP requests handled by the connection manager. These will be + // applicable to both HTTP1 and HTTP2 requests. + config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35; + + // Additional HTTP/1 settings that are passed to the HTTP/1 codec. + config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8; + + // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. + config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9; + + // An optional override that the connection manager will write to the server + // header in responses. If not set, the default is *envoy*. + string server_name = 10; + + // Defines the action to be applied to the Server header on the response path. + // By default, Envoy will overwrite the header with the value specified in + // server_name. + ServerHeaderTransformation server_header_transformation = 34 + [(validate.rules).enum = {defined_only: true}]; + + // The maximum request headers size for incoming connections. + // If unconfigured, the default max request headers allowed is 60 KiB. + // Requests that exceed this limit will receive a 431 response. + // The max configurable limit is 96 KiB, based on current implementation + // constraints. + google.protobuf.UInt32Value max_request_headers_kb = 29 + [(validate.rules).uint32 = {lte: 96 gt: 0}]; + + // The stream idle timeout for connections managed by the connection manager. + // If not specified, this defaults to 5 minutes. The default value was selected + // so as not to interfere with any smaller configured timeouts that may have + // existed in configurations prior to the introduction of this feature, while + // introducing robustness to TCP connections that terminate without a FIN. + // + // This idle timeout applies to new streams and is overridable by the + // :ref:`route-level idle_timeout + // `. Even on a stream in + // which the override applies, prior to receipt of the initial request + // headers, the :ref:`stream_idle_timeout + // ` + // applies. Each time an encode/decode event for headers or data is processed + // for the stream, the timer will be reset. If the timeout fires, the stream + // is terminated with a 408 Request Timeout error code if no upstream response + // header has been received, otherwise a stream reset occurs. + // + // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due + // to the granularity of events presented to the connection manager. For example, while receiving + // very large request headers, it may be the case that there is traffic regularly arriving on the + // wire while the connection manage is only able to observe the end-of-headers event, hence the + // stream may still idle timeout. + // + // A value of 0 will completely disable the connection manager stream idle + // timeout, although per-route idle timeout overrides will continue to apply. + google.protobuf.Duration stream_idle_timeout = 24; + + // The amount of time that Envoy will wait for the entire request to be received. + // The timer is activated when the request is initiated, and is disarmed when the last byte of the + // request is sent upstream (i.e. all decoding filters have processed the request), OR when the + // response is initiated. If not specified or set to 0, this timeout is disabled. + google.protobuf.Duration request_timeout = 28; + + // The time that Envoy will wait between sending an HTTP/2 “shutdown + // notification†(GOAWAY frame with max stream ID) and a final GOAWAY frame. + // This is used so that Envoy provides a grace period for new streams that + // race with the final GOAWAY frame. During this grace period, Envoy will + // continue to accept new streams. After the grace period, a final GOAWAY + // frame is sent and Envoy will start refusing new streams. Draining occurs + // both when a connection hits the idle timeout or during general server + // draining. The default grace period is 5000 milliseconds (5 seconds) if this + // option is not specified. + google.protobuf.Duration drain_timeout = 12; + + // The delayed close timeout is for downstream connections managed by the HTTP connection manager. + // It is defined as a grace period after connection close processing has been locally initiated + // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy + // from the downstream connection) prior to Envoy closing the socket associated with that + // connection. + // NOTE: This timeout is enforced even when the socket associated with the downstream connection + // is pending a flush of the write buffer. However, any progress made writing data to the socket + // will restart the timer associated with this timeout. This means that the total grace period for + // a socket in this state will be + // +. + // + // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close + // sequence mitigates a race condition that exists when downstream clients do not drain/process + // data in a connection's receive buffer after a remote close has been detected via a socket + // write(). This race leads to such clients failing to process the response code sent by Envoy, + // which could result in erroneous downstream processing. + // + // If the timeout triggers, Envoy will close the connection's socket. + // + // The default timeout is 1000 ms if this option is not specified. + // + // .. NOTE:: + // To be useful in avoiding the race condition described above, this timeout must be set + // to *at least* +<100ms to account for + // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. + // + // .. WARNING:: + // A value of 0 will completely disable delayed close processing. When disabled, the downstream + // connection's socket will be closed immediately after the write flush is completed or will + // never close if the write flush does not complete. + google.protobuf.Duration delayed_close_timeout = 26; + + // Configuration for :ref:`HTTP access logs ` + // emitted by the connection manager. + repeated config.accesslog.v3.AccessLog access_log = 13; + + // If set to true, the connection manager will use the real remote address + // of the client connection when determining internal versus external origin and manipulating + // various headers. If set to false or absent, the connection manager will use the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for`, + // :ref:`config_http_conn_man_headers_x-envoy-internal`, and + // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. + google.protobuf.BoolValue use_remote_address = 14; + + // The number of additional ingress proxy hops from the right side of the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when + // determining the origin client's IP address. The default is zero if this option + // is not specified. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. + uint32 xff_num_trusted_hops = 19; + + // Configures what network addresses are considered internal for stats and header sanitation + // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. + // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information about internal/external addresses. + InternalAddressConfig internal_address_config = 25; + + // If set, Envoy will not append the remote address to the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in + // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager + // has mutated the request headers. While :ref:`use_remote_address + // ` + // will also suppress XFF addition, it has consequences for logging and other + // Envoy uses of the remote address, so *skip_xff_append* should be used + // when only an elision of XFF addition is intended. + bool skip_xff_append = 21; + + // Via header value to append to request and response headers. If this is + // empty, no via header will be appended. + string via = 22; + + // Whether the connection manager will generate the :ref:`x-request-id + // ` header if it does not exist. This defaults to + // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature + // is not desired it can be disabled. + google.protobuf.BoolValue generate_request_id = 15; + + // Whether the connection manager will keep the :ref:`x-request-id + // ` header if passed for a request that is edge + // (Edge request is the request from external clients to front Envoy) and not reset it, which + // is the current Envoy behaviour. This defaults to false. + bool preserve_external_request_id = 32; + + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + ForwardClientCertDetails forward_client_cert_details = 16 + [(validate.rules).enum = {defined_only: true}]; + + // This field is valid only when :ref:`forward_client_cert_details + // ` + // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in + // the client certificate to be forwarded. Note that in the + // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and + // *By* is always set when the client certificate presents the URI type Subject Alternative Name + // value. + SetCurrentClientCertDetails set_current_client_cert_details = 17; + + // If proxy_100_continue is true, Envoy will proxy incoming "Expect: + // 100-continue" headers upstream, and forward "100 Continue" responses + // downstream. If this is false or not set, Envoy will instead strip the + // "Expect: 100-continue" header, and send a "100 Continue" response itself. + bool proxy_100_continue = 18; + + // If + // :ref:`use_remote_address + // ` + // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is + // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. + // This is useful for testing compatibility of upstream services that parse the header value. For + // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses + // `_ for details. This will also affect the + // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See + // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + // ` for runtime + // control. + // [#not-implemented-hide:] + bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; + + repeated UpgradeConfig upgrade_configs = 23; + + // Should paths be normalized according to RFC 3986 before any processing of + // requests by HTTP filters or routing? This affects the upstream *:path* header + // as well. For paths that fail this check, Envoy will respond with 400 to + // paths that are malformed. This defaults to false currently but will default + // true in the future. When not specified, this value may be overridden by the + // runtime variable + // :ref:`http_connection_manager.normalize_path`. + // See `Normalization and Comparison ` + // for details of normalization. + // Note that Envoy does not perform + // `case normalization ` + google.protobuf.BoolValue normalize_path = 30; + + // Determines if adjacent slashes in the path are merged into one before any processing of + // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without + // setting this option, incoming requests with path `//dir///file` will not match against route + // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of + // `HTTP spec ` and is provided for convenience. + bool merge_slashes = 33; + + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. + // + // If not set, Envoy uses the default UUID-based behavior: + // + // 1. Request ID is propagated using *x-request-id* header. + // + // 2. Request ID is a universally unique identifier (UUID). + // + // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. + RequestIDExtension request_id_extension = 36; +} + +message Rds { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.Rds"; + + // Configuration source specifier for RDS. + config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; + + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// This message is used to work around the limitations with 'oneof' and repeated fields. +message ScopedRouteConfigurationsList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList"; + + repeated config.route.v4alpha.ScopedRouteConfiguration scoped_route_configurations = 1 + [(validate.rules).repeated = {min_items: 1}]; +} + +// [#next-free-field: 6] +message ScopedRoutes { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes"; + + // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These + // keys are matched against a set of :ref:`Key` + // objects assembled from :ref:`ScopedRouteConfiguration` + // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via + // :ref:`scoped_route_configurations_list`. + // + // Upon receiving a request's headers, the Router will build a key using the algorithm specified + // by this message. This key will be used to look up the routing table (i.e., the + // :ref:`RouteConfiguration`) to use for the request. + message ScopeKeyBuilder { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder"; + + // Specifies the mechanism for constructing key fragments which are composed into scope keys. + message FragmentBuilder { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." + "ScopeKeyBuilder.FragmentBuilder"; + + // Specifies how the value of a header should be extracted. + // The following example maps the structure of a header to the fields in this message. + // + // .. code:: + // + // <0> <1> <-- index + // X-Header: a=b;c=d + // | || | + // | || \----> + // | || + // | |\----> + // | | + // | \----> + // | + // \----> + // + // Each 'a=b' key-value pair constitutes an 'element' of the header field. + message HeaderValueExtractor { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." + "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor"; + + // Specifies a header field's key value pair to match on. + message KvElement { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." + "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement"; + + // The separator between key and value (e.g., '=' separates 'k=v;...'). + // If an element is an empty string, the element is ignored. + // If an element contains no separator, the whole element is parsed as key and the + // fragment value is an empty string. + // If there are multiple values for a matched key, the first value is returned. + string separator = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The key to match on. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // The name of the header field to extract the value from. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The element separator (e.g., ';' separates 'a;b;c;d'). + // Default: empty string. This causes the entirety of the header field to be extracted. + // If this field is set to an empty string and 'index' is used in the oneof below, 'index' + // must be set to 0. + string element_separator = 2; + + oneof extract_type { + // Specifies the zero based index of the element to extract. + // Note Envoy concatenates multiple values of the same header key into a comma separated + // string, the splitting always happens after the concatenation. + uint32 index = 3; + + // Specifies the key value pair to extract the value from. + KvElement element = 4; + } + } + + oneof type { + option (validate.required) = true; + + // Specifies how a header field's value should be extracted. + HeaderValueExtractor header_value_extractor = 1; + } + } + + // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the + // fragments of a :ref:`ScopedRouteConfiguration`. + // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. + repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // The name assigned to the scoped routing configuration. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The algorithm to use for constructing a scope key for each request. + ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; + + // Configuration source specifier for RDS. + // This config source is used to subscribe to RouteConfiguration resources specified in + // ScopedRouteConfiguration messages. + config.core.v4alpha.ConfigSource rds_config_source = 3 + [(validate.rules).message = {required: true}]; + + oneof config_specifier { + option (validate.required) = true; + + // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by + // matching a key constructed from the request's attributes according to the algorithm specified + // by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRouteConfigurationsList scoped_route_configurations_list = 4; + + // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS + // API. A scope is assigned to a request by matching a key constructed from the request's + // attributes according to the algorithm specified by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRds scoped_rds = 5; + } +} + +message ScopedRds { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds"; + + // Configuration source specifier for scoped RDS. + config.core.v4alpha.ConfigSource scoped_rds_config_source = 1 + [(validate.rules).message = {required: true}]; +} + +message HttpFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; + + reserved 3, 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +message RequestIDExtension { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension"; + + // Request ID extension specific configuration. + google.protobuf.Any typed_config = 1; +} diff --git a/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto index 145866a4cb49..497e688f4c3d 100644 --- a/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto +++ b/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.network.kafka_broker.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_broker.v3"; option java_outer_classname = "KafkaBrokerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Kafka Broker] // Kafka Broker :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto b/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto index 6eb3e141b6e9..027bc0e3fc98 100644 --- a/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto +++ b/api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto @@ -5,13 +5,14 @@ package envoy.extensions.filters.network.local_ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/token_bucket.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.local_ratelimit.v3"; option java_outer_classname = "LocalRateLimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Local rate limit] // Local rate limit :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto b/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto index d6faf97b112f..7bd17600d145 100644 --- a/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto +++ b/api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.network.mongo_proxy.v3; import "envoy/extensions/filters/common/fault/v3/fault.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.mongo_proxy.v3"; option java_outer_classname = "MongoProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Mongo proxy] // MongoDB :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto b/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto index 08058740d609..663449b27035 100644 --- a/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto +++ b/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.network.mysql_proxy.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.mysql_proxy.v3"; option java_outer_classname = "MysqlProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: MySQL proxy] // MySQL Proxy :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto index ef88a4eefc17..b92d3cee2541 100644 --- a/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto @@ -7,13 +7,14 @@ import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.ratelimit.v3"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/rbac/v3/rbac.proto b/api/envoy/extensions/filters/network/rbac/v3/rbac.proto index e5e0022a9230..e62f7b4c419e 100644 --- a/api/envoy/extensions/filters/network/rbac/v3/rbac.proto +++ b/api/envoy/extensions/filters/network/rbac/v3/rbac.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.network.rbac.v3; import "envoy/config/rbac/v3/rbac.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v3"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/rbac/v4alpha/BUILD b/api/envoy/extensions/filters/network/rbac/v4alpha/BUILD new file mode 100644 index 000000000000..25620c85c513 --- /dev/null +++ b/api/envoy/extensions/filters/network/rbac/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/rbac/v4alpha:pkg", + "//envoy/extensions/filters/network/rbac/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto b/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto new file mode 100644 index 000000000000..8452a89822c1 --- /dev/null +++ b/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rbac.v4alpha; + +import "envoy/config/rbac/v4alpha/rbac.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v4alpha"; +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: RBAC] +// Role-Based Access Control :ref:`configuration overview `. +// [#extension: envoy.filters.network.rbac] + +// RBAC network filter config. +// +// Header should not be used in rules/shadow_rules in RBAC network filter as +// this information is only available in :ref:`RBAC http filter `. +message RBAC { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rbac.v3.RBAC"; + + enum EnforcementType { + // Apply RBAC policies when the first byte of data arrives on the connection. + ONE_TIME_ON_FIRST_BYTE = 0; + + // Continuously apply RBAC policies as data arrives. Use this mode when + // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, + // etc. when the protocol decoders emit dynamic metadata such as the + // resources being accessed and the operations on the resources. + CONTINUOUS = 1; + } + + // Specify the RBAC rules to be applied globally. + // If absent, no enforcing RBAC policy will be applied. + config.rbac.v4alpha.RBAC rules = 1; + + // Shadow rules are not enforced by the filter but will emit stats and logs + // and can be used for rule testing. + // If absent, no shadow RBAC policy will be applied. + config.rbac.v4alpha.RBAC shadow_rules = 2; + + // The prefix to use when emitting statistics. + string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; + + // RBAC enforcement strategy. By default RBAC will be enforced only once + // when the first byte of data arrives from the downstream. When used in + // conjunction with filters that emit dynamic metadata after decoding + // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to + // CONTINUOUS to enforce RBAC policies on every message boundary. + EnforcementType enforcement_type = 4; +} diff --git a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index c62e815b699a..a3341b5ac606 100644 --- a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -7,15 +7,16 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/deprecation.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - -import "envoy/annotations/deprecation.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.redis_proxy.v3"; option java_outer_classname = "RedisProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Redis Proxy] // Redis Proxy :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto b/api/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto index fb39f261f5ad..3d6f0ee234ab 100644 --- a/api/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto +++ b/api/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.network.sni_cluster.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_cluster.v3"; option java_outer_classname = "SniClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SNI Cluster Filter] // Set the upstream cluster name from the SNI field in the TLS connection. diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto index 93c9c2cf7096..3d305cadcf40 100644 --- a/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto +++ b/api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto @@ -3,20 +3,20 @@ syntax = "proto3"; package envoy.extensions.filters.network.tcp_proxy.v3; import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/hash_policy.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v3"; option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: TCP Proxy] // TCP Proxy :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto index 130156664942..4fc3289ae33d 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto @@ -6,13 +6,14 @@ import "envoy/config/ratelimit/v3/rls.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index 1d4034a3a2aa..5ce18fd06233 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -7,13 +7,14 @@ import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Thrift Proxy Route Configuration] // Thrift Proxy :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto index 5308b0d5b436..74c71afb5424 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto @@ -7,13 +7,14 @@ import "envoy/extensions/filters/network/thrift_proxy/v3/route.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; option java_outer_classname = "ThriftProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/network/wasm/v3/BUILD b/api/envoy/extensions/filters/network/wasm/v3/BUILD index 1c9ea8688661..31c49afb4c5c 100644 --- a/api/envoy/extensions/filters/network/wasm/v3/BUILD +++ b/api/envoy/extensions/filters/network/wasm/v3/BUILD @@ -6,8 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/filter/network/wasm/v2:pkg", - "//envoy/extensions/wasm/v3:pkg", + "//envoy/config/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/filters/network/wasm/v3/wasm.proto b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto index 06f9923ea5a6..ec13bc7bee48 100644 --- a/api/envoy/extensions/filters/network/wasm/v3/wasm.proto +++ b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto @@ -2,23 +2,21 @@ syntax = "proto3"; package envoy.extensions.filters.network.wasm.v3; -import "envoy/extensions/wasm/v3/wasm.proto"; +import "envoy/config/wasm/v3/wasm.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] // Wasm :ref:`configuration overview `. message Wasm { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.wasm.v2.Wasm"; - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; + config.wasm.v3.PluginConfig config = 1; } diff --git a/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto b/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto index 863c43eeb698..a90f777d79ec 100644 --- a/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto +++ b/api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.network.zookeeper_proxy.v3; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.zookeeper_proxy.v3"; option java_outer_classname = "ZookeeperProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: ZooKeeper proxy] // ZooKeeper Proxy :ref:`configuration overview `. diff --git a/api/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto b/api/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto index 8d808c30e5d9..fb7adf440288 100644 --- a/api/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto +++ b/api/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto @@ -4,11 +4,13 @@ package envoy.extensions.retry.host.omit_host_metadata.v3; import "envoy/config/core/v3/base.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.retry.host.omit_host_metadata.v3"; option java_outer_classname = "OmitHostMetadataConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Omit host metadata retry predicate] diff --git a/api/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto b/api/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto index 7d1edfbc73fc..b6a4bbecbae8 100644 --- a/api/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto +++ b/api/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.retry.priority.previous_priorities.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.retry.priority.previous_priorities.v3"; option java_outer_classname = "PreviousPrioritiesConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Previous priorities retry selector] diff --git a/api/envoy/extensions/transport_sockets/alts/v3/alts.proto b/api/envoy/extensions/transport_sockets/alts/v3/alts.proto index f04869bc9245..6c001be1c746 100644 --- a/api/envoy/extensions/transport_sockets/alts/v3/alts.proto +++ b/api/envoy/extensions/transport_sockets/alts/v3/alts.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.alts.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.alts.v3"; option java_outer_classname = "AltsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: ALTS] // [#extension: envoy.transport_sockets.alts] diff --git a/api/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto b/api/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto index 369e34c7b95a..85406c1f7713 100644 --- a/api/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto +++ b/api/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.raw_buffer.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.raw_buffer.v3"; option java_outer_classname = "RawBufferProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Raw Buffer] // [#extension: envoy.transport_sockets.raw_buffer] diff --git a/api/envoy/extensions/transport_sockets/tap/v3/tap.proto b/api/envoy/extensions/transport_sockets/tap/v3/tap.proto index 36fd4dee4a89..ef61575f67f7 100644 --- a/api/envoy/extensions/transport_sockets/tap/v3/tap.proto +++ b/api/envoy/extensions/transport_sockets/tap/v3/tap.proto @@ -5,13 +5,14 @@ package envoy.extensions.transport_sockets.tap.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap] // [#extension: envoy.transport_sockets.tap] diff --git a/api/envoy/extensions/transport_sockets/tap/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/tap/v4alpha/BUILD new file mode 100644 index 000000000000..76600e3dd208 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tap/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/common/tap/v4alpha:pkg", + "//envoy/extensions/transport_sockets/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto b/api/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto new file mode 100644 index 000000000000..5e0efc403ab5 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tap.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/extensions/common/tap/v4alpha/common.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap] +// [#extension: envoy.transport_sockets.tap] + +// Configuration for tap transport socket. This wraps another transport socket, providing the +// ability to interpose and record in plain text any traffic that is surfaced to Envoy. +message Tap { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tap.v3.Tap"; + + // Common configuration for the tap transport socket. + common.tap.v4alpha.CommonExtensionConfig common_config = 1 + [(validate.rules).message = {required: true}]; + + // The underlying transport socket being wrapped. + config.core.v4alpha.TransportSocket transport_socket = 2 + [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/cert.proto b/api/envoy/extensions/transport_sockets/tls/v3/cert.proto index 4d43b0ac9962..ea4bc1475c47 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/cert.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/cert.proto @@ -12,13 +12,14 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "CertProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common TLS configuration] @@ -429,7 +430,7 @@ message UpstreamTlsContext { google.protobuf.UInt32Value max_session_keys = 4; } -// [#next-free-field: 7] +// [#next-free-field: 8] message DownstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.DownstreamTlsContext"; @@ -451,6 +452,16 @@ message DownstreamTlsContext { // Config for fetching TLS session ticket keys via SDS API. SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; } // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD new file mode 100644 index 000000000000..e56544584bfe --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto new file mode 100644 index 000000000000..febb6d665240 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto @@ -0,0 +1,518 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "CertProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.TlsParameters"; + + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and + // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider"; + + reserved 2; + + reserved "config"; + + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.TlsCertificate"; + + // The TLS certificate chain. + config.core.v4alpha.DataSource certificate_chain = 1; + + // The TLS private key. + config.core.v4alpha.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + config.core.v4alpha.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated config.core.v4alpha.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys"; + + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated config.core.v4alpha.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext"; + + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + reserved 4; + + reserved "verify_subject_alt_name"; + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + config.core.v4alpha.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + config.core.v4alpha.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." + "CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + config.core.v4alpha.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/api/envoy/service/accesslog/v2/BUILD b/api/envoy/service/accesslog/v2/BUILD index b25083a6222f..dbaf26b180f4 100644 --- a/api/envoy/service/accesslog/v2/BUILD +++ b/api/envoy/service/accesslog/v2/BUILD @@ -9,5 +9,6 @@ api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/data/accesslog/v2:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/service/accesslog/v2/als.proto b/api/envoy/service/accesslog/v2/als.proto index 69618be73011..bbd871ff83a4 100644 --- a/api/envoy/service/accesslog/v2/als.proto +++ b/api/envoy/service/accesslog/v2/als.proto @@ -5,12 +5,14 @@ package envoy.service.accesslog.v2; import "envoy/api/v2/core/base.proto"; import "envoy/data/accesslog/v2/accesslog.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.accesslog.v2"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Access Log Service (ALS)] diff --git a/api/envoy/service/accesslog/v3/als.proto b/api/envoy/service/accesslog/v3/als.proto index 9749d1a9b96b..3f5e37325cc5 100644 --- a/api/envoy/service/accesslog/v3/als.proto +++ b/api/envoy/service/accesslog/v3/als.proto @@ -5,14 +5,15 @@ package envoy.service.accesslog.v3; import "envoy/config/core/v3/base.proto"; import "envoy/data/accesslog/v3/accesslog.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.accesslog.v3"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Access Log Service (ALS)] diff --git a/api/envoy/service/auth/v2/BUILD b/api/envoy/service/auth/v2/BUILD index 80ff3abc0c3b..b23b27a8aac9 100644 --- a/api/envoy/service/auth/v2/BUILD +++ b/api/envoy/service/auth/v2/BUILD @@ -9,5 +9,6 @@ api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/service/auth/v2/attribute_context.proto b/api/envoy/service/auth/v2/attribute_context.proto index d918c74440a6..16ac3ee23d49 100644 --- a/api/envoy/service/auth/v2/attribute_context.proto +++ b/api/envoy/service/auth/v2/attribute_context.proto @@ -7,9 +7,12 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.service.auth.v2"; option java_outer_classname = "AttributeContextProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Attribute Context ] diff --git a/api/envoy/service/auth/v2/external_auth.proto b/api/envoy/service/auth/v2/external_auth.proto index 8245106334b7..0f580fe7dc34 100644 --- a/api/envoy/service/auth/v2/external_auth.proto +++ b/api/envoy/service/auth/v2/external_auth.proto @@ -8,12 +8,14 @@ import "envoy/type/http_status.proto"; import "google/rpc/status.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v2"; option java_outer_classname = "ExternalAuthProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Authorization Service ] diff --git a/api/envoy/service/auth/v3/attribute_context.proto b/api/envoy/service/auth/v3/attribute_context.proto index 5e4e63933bfd..3c4fe0af665e 100644 --- a/api/envoy/service/auth/v3/attribute_context.proto +++ b/api/envoy/service/auth/v3/attribute_context.proto @@ -7,11 +7,13 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v3"; option java_outer_classname = "AttributeContextProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Attribute Context ] diff --git a/api/envoy/service/auth/v3/external_auth.proto b/api/envoy/service/auth/v3/external_auth.proto index d77ae9e03607..b93b61a3bde9 100644 --- a/api/envoy/service/auth/v3/external_auth.proto +++ b/api/envoy/service/auth/v3/external_auth.proto @@ -8,14 +8,15 @@ import "envoy/type/v3/http_status.proto"; import "google/rpc/status.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v3"; option java_outer_classname = "ExternalAuthProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Authorization Service ] diff --git a/api/envoy/service/cluster/v3/cds.proto b/api/envoy/service/cluster/v3/cds.proto index c0b9c4ace7ce..100ecad39a96 100644 --- a/api/envoy/service/cluster/v3/cds.proto +++ b/api/envoy/service/cluster/v3/cds.proto @@ -6,14 +6,15 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.cluster.v3"; option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: CDS] diff --git a/api/envoy/service/discovery/v2/ads.proto b/api/envoy/service/discovery/v2/ads.proto index 01759e5f1b36..d70e0cdc8e14 100644 --- a/api/envoy/service/discovery/v2/ads.proto +++ b/api/envoy/service/discovery/v2/ads.proto @@ -4,10 +4,13 @@ package envoy.service.discovery.v2; import "envoy/api/v2/discovery.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "AdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Aggregated Discovery Service (ADS)] diff --git a/api/envoy/service/discovery/v2/hds.proto b/api/envoy/service/discovery/v2/hds.proto index a0211685d28a..76f91c5a456d 100644 --- a/api/envoy/service/discovery/v2/hds.proto +++ b/api/envoy/service/discovery/v2/hds.proto @@ -10,12 +10,14 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "HdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.health.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health Discovery Service (HDS)] diff --git a/api/envoy/service/discovery/v2/rtds.proto b/api/envoy/service/discovery/v2/rtds.proto index e12ceab635bb..713ac277072b 100644 --- a/api/envoy/service/discovery/v2/rtds.proto +++ b/api/envoy/service/discovery/v2/rtds.proto @@ -9,6 +9,7 @@ import "google/protobuf/struct.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; @@ -16,6 +17,7 @@ option java_outer_classname = "RtdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.runtime.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Runtime Discovery Service (RTDS)] // RTDS :ref:`configuration overview ` diff --git a/api/envoy/service/discovery/v2/sds.proto b/api/envoy/service/discovery/v2/sds.proto index 6a131ad413bd..4d01d475c59b 100644 --- a/api/envoy/service/discovery/v2/sds.proto +++ b/api/envoy/service/discovery/v2/sds.proto @@ -8,12 +8,14 @@ import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "SdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.secret.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Secret Discovery Service (SDS)] diff --git a/api/envoy/service/discovery/v3/ads.proto b/api/envoy/service/discovery/v3/ads.proto index beaae93b91fc..03021559ab66 100644 --- a/api/envoy/service/discovery/v3/ads.proto +++ b/api/envoy/service/discovery/v3/ads.proto @@ -4,12 +4,14 @@ package envoy.service.discovery.v3; import "envoy/service/discovery/v3/discovery.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v3"; option java_outer_classname = "AdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Aggregated Discovery Service (ADS)] diff --git a/api/envoy/service/discovery/v3/discovery.proto b/api/envoy/service/discovery/v3/discovery.proto index 02997a51dca2..b8e31160a88b 100644 --- a/api/envoy/service/discovery/v3/discovery.proto +++ b/api/envoy/service/discovery/v3/discovery.proto @@ -7,11 +7,13 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v3"; option java_outer_classname = "DiscoveryProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common discovery API components] diff --git a/api/envoy/service/endpoint/v3/eds.proto b/api/envoy/service/endpoint/v3/eds.proto index ab2ec3271828..e1a8494afc8f 100644 --- a/api/envoy/service/endpoint/v3/eds.proto +++ b/api/envoy/service/endpoint/v3/eds.proto @@ -8,15 +8,16 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.endpoint.v3"; option java_outer_classname = "EdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` diff --git a/generated_api_shadow/envoy/config/filter/http/wasm/v2/BUILD b/api/envoy/service/event_reporting/v2alpha/BUILD similarity index 81% rename from generated_api_shadow/envoy/config/filter/http/wasm/v2/BUILD rename to api/envoy/service/event_reporting/v2alpha/BUILD index 7903b3becced..6db6b085b4e4 100644 --- a/generated_api_shadow/envoy/config/filter/http/wasm/v2/BUILD +++ b/api/envoy/service/event_reporting/v2alpha/BUILD @@ -5,8 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( + has_services = True, deps = [ - "//envoy/config/wasm/v2:pkg", + "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/service/event_reporting/v2alpha/event_reporting_service.proto b/api/envoy/service/event_reporting/v2alpha/event_reporting_service.proto new file mode 100644 index 000000000000..8d07f04640ca --- /dev/null +++ b/api/envoy/service/event_reporting/v2alpha/event_reporting_service.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; + +package envoy.service.event_reporting.v2alpha; + +import "envoy/api/v2/core/base.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.event_reporting.v2alpha"; +option java_outer_classname = "EventReportingServiceProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.service.event_reporting.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: gRPC Event Reporting Service] + +// [#not-implemented-hide:] +// Service for streaming different types of events from Envoy to a server. The examples of +// such events may be health check or outlier detection events. +service EventReportingService { + // Envoy will connect and send StreamEventsRequest messages forever. + // The management server may send StreamEventsResponse to configure event stream. See below. + // This API is designed for high throughput with the expectation that it might be lossy. + rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { + } +} + +// [#not-implemented-hide:] +// An events envoy sends to the management server. +message StreamEventsRequest { + message Identifier { + // The node sending the event messages over the stream. + api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; + } + + // Identifier data that will only be sent in the first message on the stream. This is effectively + // structured metadata and is a performance optimization. + Identifier identifier = 1; + + // Batch of events. When the stream is already active, it will be the events occurred + // since the last message had been sent. If the server receives unknown event type, it should + // silently ignore it. + // + // The following events are supported: + // + // * :ref:`HealthCheckEvent ` + // * :ref:`OutlierDetectionEvent ` + repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; +} + +// [#not-implemented-hide:] +// The management server may send envoy a StreamEventsResponse to tell which events the server +// is interested in. In future, with aggregated event reporting service, this message will +// contain, for example, clusters the envoy should send events for, or event types the server +// wants to process. +message StreamEventsResponse { +} diff --git a/api/envoy/service/event_reporting/v3/BUILD b/api/envoy/service/event_reporting/v3/BUILD new file mode 100644 index 000000000000..99d01d89f712 --- /dev/null +++ b/api/envoy/service/event_reporting/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/service/event_reporting/v2alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/service/event_reporting/v3/event_reporting_service.proto b/api/envoy/service/event_reporting/v3/event_reporting_service.proto new file mode 100644 index 000000000000..6f0b325902fb --- /dev/null +++ b/api/envoy/service/event_reporting/v3/event_reporting_service.proto @@ -0,0 +1,69 @@ +syntax = "proto3"; + +package envoy.service.event_reporting.v3; + +import "envoy/config/core/v3/base.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.event_reporting.v3"; +option java_outer_classname = "EventReportingServiceProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: gRPC Event Reporting Service] + +// [#not-implemented-hide:] +// Service for streaming different types of events from Envoy to a server. The examples of +// such events may be health check or outlier detection events. +service EventReportingService { + // Envoy will connect and send StreamEventsRequest messages forever. + // The management server may send StreamEventsResponse to configure event stream. See below. + // This API is designed for high throughput with the expectation that it might be lossy. + rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { + } +} + +// [#not-implemented-hide:] +// An events envoy sends to the management server. +message StreamEventsRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.event_reporting.v2alpha.StreamEventsRequest"; + + message Identifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.event_reporting.v2alpha.StreamEventsRequest.Identifier"; + + // The node sending the event messages over the stream. + config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; + } + + // Identifier data that will only be sent in the first message on the stream. This is effectively + // structured metadata and is a performance optimization. + Identifier identifier = 1; + + // Batch of events. When the stream is already active, it will be the events occurred + // since the last message had been sent. If the server receives unknown event type, it should + // silently ignore it. + // + // The following events are supported: + // + // * :ref:`HealthCheckEvent ` + // * :ref:`OutlierDetectionEvent ` + repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; +} + +// [#not-implemented-hide:] +// The management server may send envoy a StreamEventsResponse to tell which events the server +// is interested in. In future, with aggregated event reporting service, this message will +// contain, for example, clusters the envoy should send events for, or event types the server +// wants to process. +message StreamEventsResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.event_reporting.v2alpha.StreamEventsResponse"; +} diff --git a/api/envoy/service/health/v3/hds.proto b/api/envoy/service/health/v3/hds.proto index af126ced495c..0b09134709c8 100644 --- a/api/envoy/service/health/v3/hds.proto +++ b/api/envoy/service/health/v3/hds.proto @@ -9,12 +9,14 @@ import "envoy/config/endpoint/v3/endpoint_components.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.health.v3"; option java_outer_classname = "HdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health Discovery Service (HDS)] diff --git a/api/envoy/service/listener/v3/lds.proto b/api/envoy/service/listener/v3/lds.proto index 0a1b6b23564d..a7a8260619f7 100644 --- a/api/envoy/service/listener/v3/lds.proto +++ b/api/envoy/service/listener/v3/lds.proto @@ -8,15 +8,16 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.listener.v3"; option java_outer_classname = "LdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener] // Listener :ref:`configuration overview ` diff --git a/api/envoy/service/load_stats/v2/BUILD b/api/envoy/service/load_stats/v2/BUILD index e58fe9bd9a3f..504602b339ac 100644 --- a/api/envoy/service/load_stats/v2/BUILD +++ b/api/envoy/service/load_stats/v2/BUILD @@ -9,5 +9,6 @@ api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/endpoint:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/service/load_stats/v2/lrs.proto b/api/envoy/service/load_stats/v2/lrs.proto index a82d703de8c3..a71039e7ceeb 100644 --- a/api/envoy/service/load_stats/v2/lrs.proto +++ b/api/envoy/service/load_stats/v2/lrs.proto @@ -7,12 +7,14 @@ import "envoy/api/v2/endpoint/load_report.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.load_stats.v2"; option java_outer_classname = "LrsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Load reporting service] diff --git a/api/envoy/service/load_stats/v3/lrs.proto b/api/envoy/service/load_stats/v3/lrs.proto index 370a8a5925b9..ce48574826a9 100644 --- a/api/envoy/service/load_stats/v3/lrs.proto +++ b/api/envoy/service/load_stats/v3/lrs.proto @@ -7,14 +7,15 @@ import "envoy/config/endpoint/v3/load_report.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.load_stats.v3"; option java_outer_classname = "LrsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Load reporting service] diff --git a/api/envoy/service/metrics/v2/BUILD b/api/envoy/service/metrics/v2/BUILD index be8920efa9a0..824992f46200 100644 --- a/api/envoy/service/metrics/v2/BUILD +++ b/api/envoy/service/metrics/v2/BUILD @@ -8,6 +8,7 @@ api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", "@prometheus_metrics_model//:client_model", ], ) diff --git a/api/envoy/service/metrics/v2/metrics_service.proto b/api/envoy/service/metrics/v2/metrics_service.proto index 5c9a039ccd08..aa5e70385015 100644 --- a/api/envoy/service/metrics/v2/metrics_service.proto +++ b/api/envoy/service/metrics/v2/metrics_service.proto @@ -6,12 +6,14 @@ import "envoy/api/v2/core/base.proto"; import "metrics.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.metrics.v2"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metrics service] diff --git a/api/envoy/service/metrics/v3/metrics_service.proto b/api/envoy/service/metrics/v3/metrics_service.proto index 16b4279ef393..033c168c32ba 100644 --- a/api/envoy/service/metrics/v3/metrics_service.proto +++ b/api/envoy/service/metrics/v3/metrics_service.proto @@ -5,14 +5,16 @@ package envoy.service.metrics.v3; import "envoy/config/core/v3/base.proto"; import "metrics.proto"; -import "udpa/annotations/versioning.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.metrics.v3"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metrics service] diff --git a/api/envoy/service/ratelimit/v2/rls.proto b/api/envoy/service/ratelimit/v2/rls.proto index 5d9b35e0c9ff..6d97718b4b32 100644 --- a/api/envoy/service/ratelimit/v2/rls.proto +++ b/api/envoy/service/ratelimit/v2/rls.proto @@ -6,12 +6,14 @@ import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/ratelimit/ratelimit.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.ratelimit.v2"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate Limit Service (RLS)] @@ -75,6 +77,9 @@ message RateLimitResponse { DAY = 4; } + // A name or description of this limit. + string name = 3; + // The number of requests per unit of time. uint32 requests_per_unit = 1; diff --git a/api/envoy/service/ratelimit/v3/rls.proto b/api/envoy/service/ratelimit/v3/rls.proto index 7d9fd93ba83b..4aad42fcaa81 100644 --- a/api/envoy/service/ratelimit/v3/rls.proto +++ b/api/envoy/service/ratelimit/v3/rls.proto @@ -5,14 +5,15 @@ package envoy.service.ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.ratelimit.v3"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate Limit Service (RLS)] @@ -85,6 +86,9 @@ message RateLimitResponse { DAY = 4; } + // A name or description of this limit. + string name = 3; + // The number of requests per unit of time. uint32 requests_per_unit = 1; diff --git a/api/envoy/service/route/v3/rds.proto b/api/envoy/service/route/v3/rds.proto index 3514ebd2a2c7..3a2c432fd8b2 100644 --- a/api/envoy/service/route/v3/rds.proto +++ b/api/envoy/service/route/v3/rds.proto @@ -7,15 +7,16 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.route.v3"; option java_outer_classname = "RdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RDS] diff --git a/api/envoy/service/route/v3/srds.proto b/api/envoy/service/route/v3/srds.proto index db8ecbe4eb18..7a7f8f7d3a3f 100644 --- a/api/envoy/service/route/v3/srds.proto +++ b/api/envoy/service/route/v3/srds.proto @@ -6,14 +6,15 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.route.v3"; option java_outer_classname = "SrdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SRDS] // * Routing :ref:`architecture overview ` diff --git a/api/envoy/service/runtime/v3/rtds.proto b/api/envoy/service/runtime/v3/rtds.proto index 69c77f2a4937..b12844233883 100644 --- a/api/envoy/service/runtime/v3/rtds.proto +++ b/api/envoy/service/runtime/v3/rtds.proto @@ -7,15 +7,16 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.runtime.v3"; option java_outer_classname = "RtdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Runtime Discovery Service (RTDS)] // RTDS :ref:`configuration overview ` diff --git a/api/envoy/service/secret/v3/sds.proto b/api/envoy/service/secret/v3/sds.proto index e541ca9882b3..3c9441d7c760 100644 --- a/api/envoy/service/secret/v3/sds.proto +++ b/api/envoy/service/secret/v3/sds.proto @@ -6,14 +6,15 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.secret.v3"; option java_outer_classname = "SdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Secret Discovery Service (SDS)] diff --git a/api/envoy/service/status/v2/BUILD b/api/envoy/service/status/v2/BUILD index c3d204fd52a1..6e2c33fd2827 100644 --- a/api/envoy/service/status/v2/BUILD +++ b/api/envoy/service/status/v2/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/admin/v2alpha:pkg", "//envoy/api/v2/core:pkg", "//envoy/type/matcher:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/service/status/v2/csds.proto b/api/envoy/service/status/v2/csds.proto index 764c95b01b6a..2233f3cef771 100644 --- a/api/envoy/service/status/v2/csds.proto +++ b/api/envoy/service/status/v2/csds.proto @@ -9,10 +9,13 @@ import "envoy/type/matcher/node.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.service.status.v2"; option java_outer_classname = "CsdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Client Status Discovery Service (CSDS)] diff --git a/api/envoy/service/status/v3/csds.proto b/api/envoy/service/status/v3/csds.proto index 72832b4ad4b7..3347def21d8f 100644 --- a/api/envoy/service/status/v3/csds.proto +++ b/api/envoy/service/status/v3/csds.proto @@ -9,12 +9,14 @@ import "envoy/type/matcher/v3/node.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.status.v3"; option java_outer_classname = "CsdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Client Status Discovery Service (CSDS)] diff --git a/api/envoy/service/tap/v2alpha/common.proto b/api/envoy/service/tap/v2alpha/common.proto index f29400504bbd..990a3826481b 100644 --- a/api/envoy/service/tap/v2alpha/common.proto +++ b/api/envoy/service/tap/v2alpha/common.proto @@ -9,12 +9,14 @@ import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.tap.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common tap configuration] diff --git a/api/envoy/service/tap/v2alpha/tap.proto b/api/envoy/service/tap/v2alpha/tap.proto index c0d25a1b57e5..9fd18eae5d36 100644 --- a/api/envoy/service/tap/v2alpha/tap.proto +++ b/api/envoy/service/tap/v2alpha/tap.proto @@ -5,12 +5,14 @@ package envoy.service.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/data/tap/v2alpha/wrapper.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap Sink Service] diff --git a/api/envoy/service/tap/v2alpha/tapds.proto b/api/envoy/service/tap/v2alpha/tapds.proto index 6ef1288d1319..81b9cb0e447b 100644 --- a/api/envoy/service/tap/v2alpha/tapds.proto +++ b/api/envoy/service/tap/v2alpha/tapds.proto @@ -7,12 +7,14 @@ import "envoy/service/tap/v2alpha/common.proto"; import "google/api/annotations.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; option java_outer_classname = "TapdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap discovery service] diff --git a/api/envoy/service/tap/v3/tap.proto b/api/envoy/service/tap/v3/tap.proto index bf269e388024..080aba215c10 100644 --- a/api/envoy/service/tap/v3/tap.proto +++ b/api/envoy/service/tap/v3/tap.proto @@ -5,14 +5,15 @@ package envoy.service.tap.v3; import "envoy/config/core/v3/base.proto"; import "envoy/data/tap/v3/wrapper.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap Sink Service] diff --git a/api/envoy/service/tap/v3/tapds.proto b/api/envoy/service/tap/v3/tapds.proto index 80e550e9b6af..51393d6e14c7 100644 --- a/api/envoy/service/tap/v3/tapds.proto +++ b/api/envoy/service/tap/v3/tapds.proto @@ -7,14 +7,15 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v3"; option java_outer_classname = "TapdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap discovery service] diff --git a/api/envoy/service/trace/v2/BUILD b/api/envoy/service/trace/v2/BUILD index 6fce6d2d917a..dec3717aa573 100644 --- a/api/envoy/service/trace/v2/BUILD +++ b/api/envoy/service/trace/v2/BUILD @@ -8,6 +8,7 @@ api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", ], ) diff --git a/api/envoy/service/trace/v2/trace_service.proto b/api/envoy/service/trace/v2/trace_service.proto index 81449dab8675..48e65820b387 100644 --- a/api/envoy/service/trace/v2/trace_service.proto +++ b/api/envoy/service/trace/v2/trace_service.proto @@ -8,12 +8,14 @@ import "google/api/annotations.proto"; import "opencensus/proto/trace/v1/trace.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.trace.v2"; option java_outer_classname = "TraceServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Trace service] diff --git a/api/envoy/service/trace/v3/trace_service.proto b/api/envoy/service/trace/v3/trace_service.proto index b33d3af75398..facaa9211c92 100644 --- a/api/envoy/service/trace/v3/trace_service.proto +++ b/api/envoy/service/trace/v3/trace_service.proto @@ -7,14 +7,16 @@ import "envoy/config/core/v3/base.proto"; import "google/api/annotations.proto"; import "opencensus/proto/trace/v1/trace.proto"; -import "udpa/annotations/versioning.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.trace.v3"; option java_outer_classname = "TraceServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Trace service] diff --git a/api/envoy/type/BUILD b/api/envoy/type/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/api/envoy/type/BUILD +++ b/api/envoy/type/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/type/hash_policy.proto b/api/envoy/type/hash_policy.proto index 1e13e60b286b..b6aeb31fcbfd 100644 --- a/api/envoy/type/hash_policy.proto +++ b/api/envoy/type/hash_policy.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "HashPolicyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Hash Policy] diff --git a/api/envoy/type/http.proto b/api/envoy/type/http.proto index 12160c6354a9..c1c787411fad 100644 --- a/api/envoy/type/http.proto +++ b/api/envoy/type/http.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP] diff --git a/api/envoy/type/http_status.proto b/api/envoy/type/http_status.proto index e81c4f9d11dc..99b44a98c251 100644 --- a/api/envoy/type/http_status.proto +++ b/api/envoy/type/http_status.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "HttpStatusProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP status codes] diff --git a/api/envoy/type/matcher/BUILD b/api/envoy/type/matcher/BUILD index 1f0bfe3335d0..e2a45aba90ec 100644 --- a/api/envoy/type/matcher/BUILD +++ b/api/envoy/type/matcher/BUILD @@ -8,5 +8,6 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/type:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/type/matcher/metadata.proto b/api/envoy/type/matcher/metadata.proto index 43dd5b7ad139..2cbc602564c5 100644 --- a/api/envoy/type/matcher/metadata.proto +++ b/api/envoy/type/matcher/metadata.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "envoy/type/matcher/value.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metadata matcher] diff --git a/api/envoy/type/matcher/node.proto b/api/envoy/type/matcher/node.proto index 937aeba63086..c9e84a46279a 100644 --- a/api/envoy/type/matcher/node.proto +++ b/api/envoy/type/matcher/node.proto @@ -5,9 +5,12 @@ package envoy.type.matcher; import "envoy/type/matcher/string.proto"; import "envoy/type/matcher/struct.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "NodeProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Node matcher] diff --git a/api/envoy/type/matcher/number.proto b/api/envoy/type/matcher/number.proto index 52a6eb6e15ce..e488f16a4a0c 100644 --- a/api/envoy/type/matcher/number.proto +++ b/api/envoy/type/matcher/number.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "envoy/type/range.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "NumberProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Number matcher] diff --git a/api/envoy/type/matcher/path.proto b/api/envoy/type/matcher/path.proto index 779339a2d260..860a1c69f18a 100644 --- a/api/envoy/type/matcher/path.proto +++ b/api/envoy/type/matcher/path.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "envoy/type/matcher/string.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "PathProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Path matcher] diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto index 2be13845fc00..78b4a2c1d61e 100644 --- a/api/envoy/type/matcher/regex.proto +++ b/api/envoy/type/matcher/regex.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "RegexProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Regex matcher] diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto index 2cbfc2476492..431043e00ec1 100644 --- a/api/envoy/type/matcher/string.proto +++ b/api/envoy/type/matcher/string.proto @@ -5,11 +5,13 @@ package envoy.type.matcher; import "envoy/type/matcher/regex.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "StringProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: String matcher] diff --git a/api/envoy/type/matcher/struct.proto b/api/envoy/type/matcher/struct.proto index 245d839b21e3..f65b1d121845 100644 --- a/api/envoy/type/matcher/struct.proto +++ b/api/envoy/type/matcher/struct.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "envoy/type/matcher/value.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "StructProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Struct matcher] diff --git a/api/envoy/type/matcher/v3/metadata.proto b/api/envoy/type/matcher/v3/metadata.proto index 94b27a0ba835..65ec4f47ffff 100644 --- a/api/envoy/type/matcher/v3/metadata.proto +++ b/api/envoy/type/matcher/v3/metadata.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/value.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metadata matcher] diff --git a/api/envoy/type/matcher/v3/node.proto b/api/envoy/type/matcher/v3/node.proto index 602ae2e70650..fe507312135f 100644 --- a/api/envoy/type/matcher/v3/node.proto +++ b/api/envoy/type/matcher/v3/node.proto @@ -5,11 +5,13 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/matcher/v3/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "NodeProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Node matcher] diff --git a/api/envoy/type/matcher/v3/number.proto b/api/envoy/type/matcher/v3/number.proto index c5b722dc57ea..2379efdcbd23 100644 --- a/api/envoy/type/matcher/v3/number.proto +++ b/api/envoy/type/matcher/v3/number.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "envoy/type/v3/range.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "NumberProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Number matcher] diff --git a/api/envoy/type/matcher/v3/path.proto b/api/envoy/type/matcher/v3/path.proto index 68e0bee83c6e..0ce89871c9d9 100644 --- a/api/envoy/type/matcher/v3/path.proto +++ b/api/envoy/type/matcher/v3/path.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/string.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "PathProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Path matcher] diff --git a/api/envoy/type/matcher/v3/regex.proto b/api/envoy/type/matcher/v3/regex.proto index acfb905ea01c..393274794abf 100644 --- a/api/envoy/type/matcher/v3/regex.proto +++ b/api/envoy/type/matcher/v3/regex.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "RegexProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Regex matcher] diff --git a/api/envoy/type/matcher/v3/string.proto b/api/envoy/type/matcher/v3/string.proto index b67b6b22acc4..77fe48ac74cf 100644 --- a/api/envoy/type/matcher/v3/string.proto +++ b/api/envoy/type/matcher/v3/string.proto @@ -4,14 +4,15 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/regex.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "StringProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: String matcher] diff --git a/api/envoy/type/matcher/v3/struct.proto b/api/envoy/type/matcher/v3/struct.proto index 97e214d79e6a..b88d7b11bc2a 100644 --- a/api/envoy/type/matcher/v3/struct.proto +++ b/api/envoy/type/matcher/v3/struct.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/value.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "StructProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Struct matcher] diff --git a/api/envoy/type/matcher/v3/value.proto b/api/envoy/type/matcher/v3/value.proto index 6ad8750c5fa0..040332273ba3 100644 --- a/api/envoy/type/matcher/v3/value.proto +++ b/api/envoy/type/matcher/v3/value.proto @@ -5,13 +5,14 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/number.proto"; import "envoy/type/matcher/v3/string.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "ValueProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Value matcher] diff --git a/api/envoy/type/matcher/value.proto b/api/envoy/type/matcher/value.proto index dda49958904f..aaecd14e8ecd 100644 --- a/api/envoy/type/matcher/value.proto +++ b/api/envoy/type/matcher/value.proto @@ -5,11 +5,13 @@ package envoy.type.matcher; import "envoy/type/matcher/number.proto"; import "envoy/type/matcher/string.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "ValueProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Value matcher] diff --git a/api/envoy/type/metadata/v2/metadata.proto b/api/envoy/type/metadata/v2/metadata.proto index 67653519ba97..43a1a7ca9275 100644 --- a/api/envoy/type/metadata/v2/metadata.proto +++ b/api/envoy/type/metadata/v2/metadata.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.type.metadata.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.metadata.v2"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.type.metadata.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metadata] diff --git a/api/envoy/type/metadata/v3/metadata.proto b/api/envoy/type/metadata/v3/metadata.proto index f8a98d0b5805..ddcce6882057 100644 --- a/api/envoy/type/metadata/v3/metadata.proto +++ b/api/envoy/type/metadata/v3/metadata.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.type.metadata.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.metadata.v3"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metadata] diff --git a/api/envoy/type/percent.proto b/api/envoy/type/percent.proto index 3420342dee2f..fc41a26662fe 100644 --- a/api/envoy/type/percent.proto +++ b/api/envoy/type/percent.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "PercentProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Percent] diff --git a/api/envoy/type/range.proto b/api/envoy/type/range.proto index e550ca19bfc3..79aaa81975c3 100644 --- a/api/envoy/type/range.proto +++ b/api/envoy/type/range.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "RangeProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Range] diff --git a/api/envoy/type/semantic_version.proto b/api/envoy/type/semantic_version.proto index a7dbf7ebd6ef..80fe016bfa16 100644 --- a/api/envoy/type/semantic_version.proto +++ b/api/envoy/type/semantic_version.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "SemanticVersionProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Semantic Version] diff --git a/api/envoy/type/token_bucket.proto b/api/envoy/type/token_bucket.proto index b293b76be192..41b6d268d5f6 100644 --- a/api/envoy/type/token_bucket.proto +++ b/api/envoy/type/token_bucket.proto @@ -5,11 +5,13 @@ package envoy.type; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "TokenBucketProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Token bucket] diff --git a/api/envoy/type/tracing/v2/BUILD b/api/envoy/type/tracing/v2/BUILD index 7088ddfe0dad..34e1b604ce9f 100644 --- a/api/envoy/type/tracing/v2/BUILD +++ b/api/envoy/type/tracing/v2/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/type/metadata/v2:pkg"], + deps = [ + "//envoy/type/metadata/v2:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/api/envoy/type/tracing/v2/custom_tag.proto b/api/envoy/type/tracing/v2/custom_tag.proto index 750c07f79943..7506ae886125 100644 --- a/api/envoy/type/tracing/v2/custom_tag.proto +++ b/api/envoy/type/tracing/v2/custom_tag.proto @@ -4,11 +4,13 @@ package envoy.type.tracing.v2; import "envoy/type/metadata/v2/metadata.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.tracing.v2"; option java_outer_classname = "CustomTagProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Custom Tag] diff --git a/api/envoy/type/tracing/v3/custom_tag.proto b/api/envoy/type/tracing/v3/custom_tag.proto index 9b8d6029e127..42518ead59d1 100644 --- a/api/envoy/type/tracing/v3/custom_tag.proto +++ b/api/envoy/type/tracing/v3/custom_tag.proto @@ -4,13 +4,14 @@ package envoy.type.tracing.v3; import "envoy/type/metadata/v3/metadata.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.tracing.v3"; option java_outer_classname = "CustomTagProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Custom Tag] diff --git a/api/envoy/type/v3/hash_policy.proto b/api/envoy/type/v3/hash_policy.proto index 2a27306b1171..96c39299698f 100644 --- a/api/envoy/type/v3/hash_policy.proto +++ b/api/envoy/type/v3/hash_policy.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "HashPolicyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Hash Policy] diff --git a/api/envoy/type/v3/http.proto b/api/envoy/type/v3/http.proto index 2018b8a1e76b..fec15d11f871 100644 --- a/api/envoy/type/v3/http.proto +++ b/api/envoy/type/v3/http.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP] diff --git a/api/envoy/type/v3/http_status.proto b/api/envoy/type/v3/http_status.proto index ca990b17c75b..8914b7a0264a 100644 --- a/api/envoy/type/v3/http_status.proto +++ b/api/envoy/type/v3/http_status.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "HttpStatusProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP status codes] diff --git a/api/envoy/type/v3/percent.proto b/api/envoy/type/v3/percent.proto index 80439bc606b2..3a89a3f44fd5 100644 --- a/api/envoy/type/v3/percent.proto +++ b/api/envoy/type/v3/percent.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "PercentProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Percent] diff --git a/api/envoy/type/v3/range.proto b/api/envoy/type/v3/range.proto index c0e8348768dc..de1d55b09a21 100644 --- a/api/envoy/type/v3/range.proto +++ b/api/envoy/type/v3/range.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "RangeProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Range] diff --git a/api/envoy/type/v3/semantic_version.proto b/api/envoy/type/v3/semantic_version.proto index 38f3484ae58b..a4126336f03a 100644 --- a/api/envoy/type/v3/semantic_version.proto +++ b/api/envoy/type/v3/semantic_version.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "SemanticVersionProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Semantic Version] diff --git a/api/envoy/type/v3/token_bucket.proto b/api/envoy/type/v3/token_bucket.proto index 34296f3ae37f..a96d50fbd0ab 100644 --- a/api/envoy/type/v3/token_bucket.proto +++ b/api/envoy/type/v3/token_bucket.proto @@ -5,13 +5,14 @@ package envoy.type.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "TokenBucketProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Token bucket] diff --git a/api/tools/BUILD b/api/tools/BUILD index d94214d7bacd..8d2207b94070 100644 --- a/api/tools/BUILD +++ b/api/tools/BUILD @@ -4,7 +4,6 @@ py_binary( name = "tap2pcap", srcs = ["tap2pcap.py"], licenses = ["notice"], # Apache 2 - python_version = "PY2", visibility = ["//visibility:public"], deps = ["//envoy/data/tap/v2alpha:pkg_py_proto"], ) @@ -16,7 +15,6 @@ py_test( "data/tap2pcap_h2_ipv4.pb_text", "data/tap2pcap_h2_ipv4.txt", ], - python_version = "PY2", # Don't run this by default, since we don't want to force local dependency on Wireshark/tshark, # will explicitly invoke in CI. tags = ["manual"], diff --git a/api/tools/tap2pcap.py b/api/tools/tap2pcap.py index db65327210ac..2e9057940556 100644 --- a/api/tools/tap2pcap.py +++ b/api/tools/tap2pcap.py @@ -20,8 +20,8 @@ from __future__ import print_function import datetime +import io import socket -import StringIO import subprocess as sp import sys import time @@ -32,14 +32,14 @@ def DumpEvent(direction, timestamp, data): - dump = StringIO.StringIO() + dump = io.StringIO() dump.write('%s\n' % direction) # Adjust to local timezone adjusted_dt = timestamp.ToDatetime() - datetime.timedelta(seconds=time.altzone) dump.write('%s\n' % adjusted_dt) od = sp.Popen(['od', '-Ax', '-tx1', '-v'], stdout=sp.PIPE, stdin=sp.PIPE, stderr=sp.PIPE) packet_dump = od.communicate(data)[0] - dump.write(packet_dump) + dump.write(packet_dump.decode()) return dump.getvalue() @@ -78,7 +78,7 @@ def Tap2Pcap(tap_path, pcap_path): '%d,%d' % (remote_port, local_port), '-', pcap_path ] text2pcap = sp.Popen(text2pcap_args, stdout=sp.PIPE, stdin=sp.PIPE) - text2pcap.communicate('\n'.join(dumps)) + text2pcap.communicate('\n'.join(dumps).encode()) if __name__ == '__main__': diff --git a/api/tools/tap2pcap_test.py b/api/tools/tap2pcap_test.py index 9f504cf660a6..429bbbbf1a8e 100644 --- a/api/tools/tap2pcap_test.py +++ b/api/tools/tap2pcap_test.py @@ -18,7 +18,7 @@ tap2pcap.Tap2Pcap(tap_path, pcap_path) actual_output = sp.check_output(['tshark', '-r', pcap_path, '-d', 'tcp.port==10000,http2', '-P']) - with open(expected_path, 'r') as f: + with open(expected_path, 'rb') as f: expected_output = f.read() if actual_output != expected_output: print('Mismatch') diff --git a/api/versioning/BUILD b/api/versioning/BUILD new file mode 100644 index 000000000000..981f46f23cf3 --- /dev/null +++ b/api/versioning/BUILD @@ -0,0 +1,249 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/active_protos_gen.py. + +licenses(["notice"]) # Apache 2 + +load("@rules_proto//proto:defs.bzl", "proto_library") + +# This tracks active development versions of protos. +proto_library( + name = "active_protos", + visibility = ["//visibility:public"], + deps = [ + "//envoy/admin/v3:pkg", + "//envoy/config/accesslog/v3:pkg", + "//envoy/config/bootstrap/v3:pkg", + "//envoy/config/cluster/v3:pkg", + "//envoy/config/core/v3:pkg", + "//envoy/config/endpoint/v3:pkg", + "//envoy/config/filter/thrift/router/v2alpha1:pkg", + "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", + "//envoy/config/grpc_credential/v3:pkg", + "//envoy/config/health_checker/redis/v2:pkg", + "//envoy/config/listener/v3:pkg", + "//envoy/config/metrics/v3:pkg", + "//envoy/config/overload/v3:pkg", + "//envoy/config/ratelimit/v3:pkg", + "//envoy/config/rbac/v3:pkg", + "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", + "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", + "//envoy/config/retry/omit_canary_hosts/v2:pkg", + "//envoy/config/retry/previous_hosts/v2:pkg", + "//envoy/config/route/v3:pkg", + "//envoy/config/tap/v3:pkg", + "//envoy/config/trace/v3:pkg", + "//envoy/config/wasm/v3:pkg", + "//envoy/data/accesslog/v3:pkg", + "//envoy/data/cluster/v3:pkg", + "//envoy/data/core/v3:pkg", + "//envoy/data/dns/v3:pkg", + "//envoy/data/tap/v3:pkg", + "//envoy/extensions/access_loggers/file/v3:pkg", + "//envoy/extensions/access_loggers/grpc/v3:pkg", + "//envoy/extensions/access_loggers/wasm/v3:pkg", + "//envoy/extensions/clusters/aggregate/v3:pkg", + "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", + "//envoy/extensions/clusters/redis/v3:pkg", + "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", + "//envoy/extensions/common/ratelimit/v3:pkg", + "//envoy/extensions/common/tap/v3:pkg", + "//envoy/extensions/filter/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/filters/common/fault/v3:pkg", + "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", + "//envoy/extensions/filters/http/aws_lambda/v3:pkg", + "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", + "//envoy/extensions/filters/http/buffer/v3:pkg", + "//envoy/extensions/filters/http/cache/v3alpha:pkg", + "//envoy/extensions/filters/http/compressor/v3:pkg", + "//envoy/extensions/filters/http/cors/v3:pkg", + "//envoy/extensions/filters/http/csrf/v3:pkg", + "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", + "//envoy/extensions/filters/http/dynamo/v3:pkg", + "//envoy/extensions/filters/http/ext_authz/v3:pkg", + "//envoy/extensions/filters/http/fault/v3:pkg", + "//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg", + "//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg", + "//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg", + "//envoy/extensions/filters/http/grpc_stats/v3:pkg", + "//envoy/extensions/filters/http/grpc_web/v3:pkg", + "//envoy/extensions/filters/http/gzip/v3:pkg", + "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", + "//envoy/extensions/filters/http/health_check/v3:pkg", + "//envoy/extensions/filters/http/ip_tagging/v3:pkg", + "//envoy/extensions/filters/http/jwt_authn/v3:pkg", + "//envoy/extensions/filters/http/lua/v3:pkg", + "//envoy/extensions/filters/http/on_demand/v3:pkg", + "//envoy/extensions/filters/http/original_src/v3:pkg", + "//envoy/extensions/filters/http/ratelimit/v3:pkg", + "//envoy/extensions/filters/http/rbac/v3:pkg", + "//envoy/extensions/filters/http/router/v3:pkg", + "//envoy/extensions/filters/http/squash/v3:pkg", + "//envoy/extensions/filters/http/tap/v3:pkg", + "//envoy/extensions/filters/http/wasm/v3:pkg", + "//envoy/extensions/filters/listener/http_inspector/v3:pkg", + "//envoy/extensions/filters/listener/original_dst/v3:pkg", + "//envoy/extensions/filters/listener/original_src/v3:pkg", + "//envoy/extensions/filters/listener/proxy_protocol/v3:pkg", + "//envoy/extensions/filters/listener/tls_inspector/v3:pkg", + "//envoy/extensions/filters/network/client_ssl_auth/v3:pkg", + "//envoy/extensions/filters/network/direct_response/v3:pkg", + "//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg", + "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", + "//envoy/extensions/filters/network/echo/v3:pkg", + "//envoy/extensions/filters/network/ext_authz/v3:pkg", + "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", + "//envoy/extensions/filters/network/kafka_broker/v3:pkg", + "//envoy/extensions/filters/network/local_ratelimit/v3:pkg", + "//envoy/extensions/filters/network/mongo_proxy/v3:pkg", + "//envoy/extensions/filters/network/mysql_proxy/v3:pkg", + "//envoy/extensions/filters/network/ratelimit/v3:pkg", + "//envoy/extensions/filters/network/rbac/v3:pkg", + "//envoy/extensions/filters/network/redis_proxy/v3:pkg", + "//envoy/extensions/filters/network/sni_cluster/v3:pkg", + "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", + "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", + "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", + "//envoy/extensions/filters/network/wasm/v3:pkg", + "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", + "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", + "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", + "//envoy/extensions/transport_sockets/alts/v3:pkg", + "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", + "//envoy/extensions/transport_sockets/tap/v3:pkg", + "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/service/accesslog/v3:pkg", + "//envoy/service/auth/v3:pkg", + "//envoy/service/cluster/v3:pkg", + "//envoy/service/discovery/v3:pkg", + "//envoy/service/endpoint/v3:pkg", + "//envoy/service/event_reporting/v3:pkg", + "//envoy/service/health/v3:pkg", + "//envoy/service/listener/v3:pkg", + "//envoy/service/load_stats/v3:pkg", + "//envoy/service/metrics/v3:pkg", + "//envoy/service/ratelimit/v3:pkg", + "//envoy/service/route/v3:pkg", + "//envoy/service/runtime/v3:pkg", + "//envoy/service/secret/v3:pkg", + "//envoy/service/status/v3:pkg", + "//envoy/service/tap/v3:pkg", + "//envoy/service/trace/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/metadata/v3:pkg", + "//envoy/type/tracing/v3:pkg", + "//envoy/type/v3:pkg", + ], +) + +# This tracks frozen versions of protos. +proto_library( + name = "frozen_protos", + visibility = ["//visibility:public"], + deps = [ + "//envoy/admin/v2alpha:pkg", + "//envoy/api/v2:pkg", + "//envoy/api/v2/auth:pkg", + "//envoy/api/v2/cluster:pkg", + "//envoy/api/v2/core:pkg", + "//envoy/api/v2/endpoint:pkg", + "//envoy/api/v2/listener:pkg", + "//envoy/api/v2/ratelimit:pkg", + "//envoy/api/v2/route:pkg", + "//envoy/config/accesslog/v2:pkg", + "//envoy/config/bootstrap/v2:pkg", + "//envoy/config/cluster/aggregate/v2alpha:pkg", + "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg", + "//envoy/config/cluster/redis:pkg", + "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", + "//envoy/config/common/tap/v2alpha:pkg", + "//envoy/config/filter/accesslog/v2:pkg", + "//envoy/config/filter/dubbo/router/v2alpha1:pkg", + "//envoy/config/filter/fault/v2:pkg", + "//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg", + "//envoy/config/filter/http/aws_lambda/v2alpha:pkg", + "//envoy/config/filter/http/aws_request_signing/v2alpha:pkg", + "//envoy/config/filter/http/buffer/v2:pkg", + "//envoy/config/filter/http/cache/v2alpha:pkg", + "//envoy/config/filter/http/compressor/v2:pkg", + "//envoy/config/filter/http/cors/v2:pkg", + "//envoy/config/filter/http/csrf/v2:pkg", + "//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg", + "//envoy/config/filter/http/dynamo/v2:pkg", + "//envoy/config/filter/http/ext_authz/v2:pkg", + "//envoy/config/filter/http/fault/v2:pkg", + "//envoy/config/filter/http/grpc_http1_bridge/v2:pkg", + "//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg", + "//envoy/config/filter/http/grpc_stats/v2alpha:pkg", + "//envoy/config/filter/http/grpc_web/v2:pkg", + "//envoy/config/filter/http/gzip/v2:pkg", + "//envoy/config/filter/http/header_to_metadata/v2:pkg", + "//envoy/config/filter/http/health_check/v2:pkg", + "//envoy/config/filter/http/ip_tagging/v2:pkg", + "//envoy/config/filter/http/jwt_authn/v2alpha:pkg", + "//envoy/config/filter/http/lua/v2:pkg", + "//envoy/config/filter/http/on_demand/v2:pkg", + "//envoy/config/filter/http/original_src/v2alpha1:pkg", + "//envoy/config/filter/http/rate_limit/v2:pkg", + "//envoy/config/filter/http/rbac/v2:pkg", + "//envoy/config/filter/http/router/v2:pkg", + "//envoy/config/filter/http/squash/v2:pkg", + "//envoy/config/filter/http/tap/v2alpha:pkg", + "//envoy/config/filter/http/transcoder/v2:pkg", + "//envoy/config/filter/listener/http_inspector/v2:pkg", + "//envoy/config/filter/listener/original_dst/v2:pkg", + "//envoy/config/filter/listener/original_src/v2alpha1:pkg", + "//envoy/config/filter/listener/proxy_protocol/v2:pkg", + "//envoy/config/filter/listener/tls_inspector/v2:pkg", + "//envoy/config/filter/network/client_ssl_auth/v2:pkg", + "//envoy/config/filter/network/direct_response/v2:pkg", + "//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg", + "//envoy/config/filter/network/echo/v2:pkg", + "//envoy/config/filter/network/ext_authz/v2:pkg", + "//envoy/config/filter/network/http_connection_manager/v2:pkg", + "//envoy/config/filter/network/kafka_broker/v2alpha1:pkg", + "//envoy/config/filter/network/local_rate_limit/v2alpha:pkg", + "//envoy/config/filter/network/mongo_proxy/v2:pkg", + "//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg", + "//envoy/config/filter/network/rate_limit/v2:pkg", + "//envoy/config/filter/network/rbac/v2:pkg", + "//envoy/config/filter/network/redis_proxy/v2:pkg", + "//envoy/config/filter/network/sni_cluster/v2:pkg", + "//envoy/config/filter/network/tcp_proxy/v2:pkg", + "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", + "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", + "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", + "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", + "//envoy/config/grpc_credential/v2alpha:pkg", + "//envoy/config/listener/v2:pkg", + "//envoy/config/metrics/v2:pkg", + "//envoy/config/overload/v2alpha:pkg", + "//envoy/config/ratelimit/v2:pkg", + "//envoy/config/rbac/v2:pkg", + "//envoy/config/retry/omit_host_metadata/v2:pkg", + "//envoy/config/retry/previous_priorities:pkg", + "//envoy/config/trace/v2:pkg", + "//envoy/config/trace/v2alpha:pkg", + "//envoy/config/transport_socket/alts/v2alpha:pkg", + "//envoy/config/transport_socket/raw_buffer/v2:pkg", + "//envoy/config/transport_socket/tap/v2alpha:pkg", + "//envoy/config/wasm/v2:pkg", + "//envoy/data/accesslog/v2:pkg", + "//envoy/data/cluster/v2alpha:pkg", + "//envoy/data/core/v2alpha:pkg", + "//envoy/data/dns/v2alpha:pkg", + "//envoy/data/tap/v2alpha:pkg", + "//envoy/service/accesslog/v2:pkg", + "//envoy/service/auth/v2:pkg", + "//envoy/service/discovery/v2:pkg", + "//envoy/service/event_reporting/v2alpha:pkg", + "//envoy/service/load_stats/v2:pkg", + "//envoy/service/metrics/v2:pkg", + "//envoy/service/ratelimit/v2:pkg", + "//envoy/service/status/v2:pkg", + "//envoy/service/tap/v2alpha:pkg", + "//envoy/service/trace/v2:pkg", + "//envoy/type:pkg", + "//envoy/type/matcher:pkg", + "//envoy/type/metadata/v2:pkg", + "//envoy/type/tracing/v2:pkg", + ], +) diff --git a/bazel/BUILD b/bazel/BUILD index 0455f48745b3..a58f8c99a8e9 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -127,6 +127,11 @@ config_setting( values = {"define": "ENVOY_CONFIG_COVERAGE=1"}, ) +config_setting( + name = "compdb_build", + values = {"define": "ENVOY_CONFIG_COMPILATION_DATABASE=1"}, +) + config_setting( name = "clang_build", flag_values = { diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index a1b3a9fd6999..7793129376aa 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -70,6 +70,25 @@ The name of the dependency can be found in [the repository locations file.](https://github.com/envoyproxy/envoy/blob/master/bazel/repository_locations.bzl) The path of the local copy has to be absolute path. +For repositories built by `envoy_cmake_external()` in `bazel/foreign_cc/BUILD`, +it is necessary to populate the local copy with some additional Bazel machinery +to support `--override_repository`: +1. Place an empty `WORKSPACE` in the root. +2. Place a `BUILD` file with `filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])` + in the root. + +# Debugging external dependencies + +For all external dependencies, overriding with a local copy as described in the +previous section is a useful tool. + +Below we describe specific tips for obtaining additional debug for specific +dependencies: + +* `libevent`: add `"EVENT__ENABLE_VERBOSE_DEBUG": "on",` to `cache_entries` + in the `event` target in `bazel/foreign_cc/BUILD` for verbose tracing of + libevent processing. + # Distdir - prefetching dependencies Usually Bazel downloads all dependencies during build time. But there is a diff --git a/bazel/README.md b/bazel/README.md index 276d06771130..4f1512e9e171 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -591,11 +591,19 @@ Sometimes it's useful to see real system paths in bazel error message output (vs Run `tools/gen_compilation_database.py` to generate a [JSON Compilation Database](https://clang.llvm.org/docs/JSONCompilationDatabase.html). This could be used -with any tools (e.g. clang-tidy) compatible with the format. +with any tools (e.g. clang-tidy) compatible with the format. It is recommended to run this script +with `TEST_TMPDIR` set, so the Bazel artifacts doesn't get cleaned up in next `bazel build` or `bazel test`. The compilation database could also be used to setup editors with cross reference, code completion. For example, you can use [You Complete Me](https://valloric.github.io/YouCompleteMe/) or -[cquery](https://github.com/cquery-project/cquery) with supported editors. +[clangd](https://clangd.llvm.org/) with supported editors. + +For example, use following command to prepare a compilation database: + +``` +TEST_TMPDIR=/tmp tools/gen_compilation_database.py --run_bazel_build +``` + # Running clang-format without docker diff --git a/bazel/api_binding.bzl b/bazel/api_binding.bzl index 7a61ce6b9cbd..550685f6299a 100644 --- a/bazel/api_binding.bzl +++ b/bazel/api_binding.bzl @@ -3,11 +3,11 @@ def _default_envoy_api_impl(ctx): api_dirs = [ "BUILD", "bazel", - "docs", "envoy", "examples", "test", "tools", + "versioning", ] for d in api_dirs: ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child(ctx.attr.reldir).get_child(d), d) diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index 53810f440db9..2ac0d75883b1 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -95,6 +95,14 @@ def envoy_cc_library( if tcmalloc_dep: deps += tcmalloc_external_deps(repository) + # Intended for compilation database generation. This generates an empty cc + # source file so Bazel generates virtual includes and recognize them as C++. + # Workaround for https://github.com/bazelbuild/bazel/issues/10845. + srcs += select({ + "@envoy//bazel:compdb_build": ["@envoy//bazel/external:empty.cc"], + "//conditions:default": [], + }) + native.cc_library( name = name, srcs = srcs, @@ -130,6 +138,7 @@ def envoy_cc_library( hdrs = hdrs, copts = envoy_copts(repository) + copts, visibility = visibility, + tags = ["nocompdb"], deps = [":" + name], strip_include_prefix = strip_include_prefix, ) diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 515f2631cb26..ca0b430c16f0 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -221,6 +221,12 @@ def envoy_cc_test_library( deps = deps + [ repository + "//test/test_common:printers_includes", ] + + # Same as envoy_cc_library + srcs += select({ + "@envoy//bazel:compdb_build": ["@envoy//bazel/external:empty.cc"], + "//conditions:default": [], + }) _envoy_cc_test_infrastructure_library( name, srcs, @@ -264,12 +270,14 @@ def envoy_cc_benchmark_binary( def envoy_benchmark_test( name, benchmark_binary, - data = []): + data = [], + **kargs): native.sh_test( name = name, srcs = ["//bazel:test_for_benchmark_wrapper.sh"], data = [":" + benchmark_binary] + data, args = ["%s/%s" % (native.package_name(), benchmark_binary)], + **kargs ) # Envoy Python test binaries should be specified with this function. diff --git a/bazel/external/BUILD b/bazel/external/BUILD index 373cf9f8e1df..11dabbc90cee 100644 --- a/bazel/external/BUILD +++ b/bazel/external/BUILD @@ -21,4 +21,5 @@ genrule( name = "empty_cc", outs = ["empty.cc"], cmd = "touch \"$(@D)/empty.cc\"", + visibility = ["//visibility:public"], ) diff --git a/bazel/external/compiler_rt.BUILD b/bazel/external/compiler_rt.BUILD index e75ac2ef266c..96d90b46ab23 100644 --- a/bazel/external/compiler_rt.BUILD +++ b/bazel/external/compiler_rt.BUILD @@ -2,8 +2,6 @@ licenses(["notice"]) # Apache 2 cc_library( name = "fuzzed_data_provider", - hdrs = ["utils/FuzzedDataProvider.h"], - # This is moving from lib/fuzzer/utils to include/fuzzer after LLVM 9.0. - include_prefix = "compiler_rt/fuzzer", + hdrs = ["fuzzer/utils/FuzzedDataProvider.h"], visibility = ["//visibility:public"], ) diff --git a/bazel/foreign_cc/curl-revert-cmake-minreqver.patch b/bazel/foreign_cc/curl-revert-cmake-minreqver.patch new file mode 100644 index 000000000000..78ba60fdb34b --- /dev/null +++ b/bazel/foreign_cc/curl-revert-cmake-minreqver.patch @@ -0,0 +1,17 @@ +# Curl 7.69.1 introduces a range-bound cmake revisions between 3.0 and 3.16 +# but this causes the Win32 build to be broken (and is unwise as cmake +# has already released 3.17) +diff --git a/CMakeLists.txt b/CMakeLists.txt +index b13616fc7..8b6d77542 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -38,8 +38,7 @@ + # To check: + # (From Daniel Stenberg) The cmake build selected to run gcc with -fPIC on my box while the plain configure script did not. + # (From Daniel Stenberg) The gcc command line use neither -g nor any -O options. As a developer, I also treasure our configure scripts's --enable-debug option that sets a long range of "picky" compiler options. +-cmake_minimum_required(VERSION 3.0...3.16 FATAL_ERROR) +- ++cmake_minimum_required(VERSION 3.0 FATAL_ERROR) + set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake;${CMAKE_MODULE_PATH}") + include(Utilities) + include(Macros) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index be0df3e25b61..b6ea96b38ef6 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -143,8 +143,7 @@ def envoy_dependencies(skip_targets = []): _com_github_envoyproxy_sqlparser() _com_googlesource_chromium_v8() _com_googlesource_quiche() - _org_llvm_llvm() - _com_github_wavm_wavm() + _com_googlesource_googleurl() _com_lightstep_tracer_cpp() _io_opentracing_cpp() _net_zlib() @@ -631,6 +630,8 @@ def _com_github_curl(): build_file_content = BUILD_ALL_CONTENT + """ cc_library(name = "curl", visibility = ["//visibility:public"], deps = ["@envoy//bazel/foreign_cc:curl"]) """, + patches = ["@envoy//bazel/foreign_cc:curl-revert-cmake-minreqver.patch"], + patch_args = ["-p1"], **location ) native.bind( @@ -682,6 +683,15 @@ def _com_googlesource_quiche(): actual = "@com_googlesource_quiche//:quic_platform_base", ) +def _com_googlesource_googleurl(): + _repository_impl( + name = "com_googlesource_googleurl", + ) + native.bind( + name = "googleurl", + actual = "@com_googlesource_googleurl//url:url", + ) + def _org_llvm_releases_compiler_rt(): _repository_impl( name = "org_llvm_releases_compiler_rt", diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 6d804e9e1acf..d695796e1938 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -9,11 +9,11 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz"], ), bazel_toolchains = dict( - sha256 = "e2126599d29f2028e6b267eba273dcc8e7f4a35ff323e9600cf42fb03875b7c6", - strip_prefix = "bazel-toolchains-2.0.0", + sha256 = "1342f84d4324987f63307eb6a5aac2dff6d27967860a129f5cd40f8f9b6fd7dd", + strip_prefix = "bazel-toolchains-2.2.0", urls = [ - "https://github.com/bazelbuild/bazel-toolchains/releases/download/2.0.0/bazel-toolchains-2.0.0.tar.gz", - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/2.0.0.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/releases/download/2.2.0/bazel-toolchains-2.2.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/2.2.0.tar.gz", ], ), build_bazel_rules_apple = dict( @@ -21,10 +21,10 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/bazelbuild/rules_apple/releases/download/0.19.0/rules_apple.0.19.0.tar.gz"], ), envoy_build_tools = dict( - sha256 = "97f7276eeae150ce84de5406174e87ea82c6c9aad3e3e4ede1067b5d8205c980", - strip_prefix = "envoy-build-tools-0a98f4bd8b3eeeaa11a10f6a4fe5c59e7c2e16df", - # 2019-12-23 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/0a98f4bd8b3eeeaa11a10f6a4fe5c59e7c2e16df.tar.gz"], + sha256 = "c4193e6ab0c93db3e519dc8aeaf588e3dc414620063e00003150f64f03ad1f3f", + strip_prefix = "envoy-build-tools-84ca08de00eedd0ba08e7d5551108d6f03f5d362", + # 2020-03-24 + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/84ca08de00eedd0ba08e7d5551108d6f03f5d362.tar.gz"], ), boringssl = dict( sha256 = "a3d4de4f03cb321ef943678d72a045c9a19d26b23d6f4e313f97600c65201a27", @@ -69,15 +69,15 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/circonus-labs/libcircllhist/archive/63a16dd6f2fc7bc841bb17ff92be8318df60e2e1.tar.gz"], ), com_github_cyan4973_xxhash = dict( - sha256 = "7e93d28e81c3e95ff07674a400001d0cdf23b7842d49b211e5582d00d8e3ac3e", - strip_prefix = "xxHash-0.7.2", - urls = ["https://github.com/Cyan4973/xxHash/archive/v0.7.2.tar.gz"], + sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7", + strip_prefix = "xxHash-0.7.3", + urls = ["https://github.com/Cyan4973/xxHash/archive/v0.7.3.tar.gz"], ), com_github_envoyproxy_sqlparser = dict( - sha256 = "8ccd2fe9087d96a91eda0929f3a0bdb5d9f877dd9cdc91f9fcf85385fe70cc14", - strip_prefix = "sql-parser-85904962d8723bbd3ae280dfb13cf4cc563faabc", - # 2020-01-08 - urls = ["https://github.com/envoyproxy/sql-parser/archive/85904962d8723bbd3ae280dfb13cf4cc563faabc.tar.gz"], + sha256 = "b2d3882698cf85b64c87121e208ce0b24d5fe2a00a5d058cf4571f1b25b45403", + strip_prefix = "sql-parser-b14d010afd4313f2372a1cc96aa2327e674cc798", + # 2020-01-10 + urls = ["https://github.com/envoyproxy/sql-parser/archive/b14d010afd4313f2372a1cc96aa2327e674cc798.tar.gz"], ), com_github_mirror_tclap = dict( sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f", @@ -85,14 +85,14 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/mirror/tclap/archive/tclap-1-2-1-release-final.tar.gz"], ), com_github_fmtlib_fmt = dict( - sha256 = "4c0741e10183f75d7d6f730b8708a99b329b2f942dad5a9da3385ab92bb4a15c", - strip_prefix = "fmt-5.3.0", - urls = ["https://github.com/fmtlib/fmt/releases/download/5.3.0/fmt-5.3.0.zip"], + sha256 = "f1907a58d5e86e6c382e51441d92ad9e23aea63827ba47fd647eacc0d3a16c78", + strip_prefix = "fmt-6.0.0", + urls = ["https://github.com/fmtlib/fmt/archive/6.0.0.tar.gz"], ), com_github_gabime_spdlog = dict( - sha256 = "160845266e94db1d4922ef755637f6901266731c4cb3b30b45bf41efa0e6ab70", - strip_prefix = "spdlog-1.3.1", - urls = ["https://github.com/gabime/spdlog/archive/v1.3.1.tar.gz"], + sha256 = "afd18f62d1bc466c60bef088e6b637b0284be88c515cedc59ad4554150af6043", + strip_prefix = "spdlog-1.4.0", + urls = ["https://github.com/gabime/spdlog/archive/v1.4.0.tar.gz"], ), com_github_google_libprotobuf_mutator = dict( sha256 = "", @@ -136,10 +136,10 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/opentracing/opentracing-cpp/archive/v1.5.1.tar.gz"], ), com_lightstep_tracer_cpp = dict( - sha256 = "1ed7faaad1deabddb83791b2b7f2ec79d25b47009994c1a8bb6da85244c60e4f", - strip_prefix = "lightstep-tracer-cpp-665d8388aafbbbb44994f4fa12b3b65b9dcea613", - # 2019-03-11 - urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/665d8388aafbbbb44994f4fa12b3b65b9dcea613.tar.gz"], + sha256 = "0e99716598c010e56bc427ea3482be5ad2c534be8b039d172564deec1264a213", + strip_prefix = "lightstep-tracer-cpp-3efe2372ee3d7c2138d6b26e542d757494a7938d", + # 2020-03-24 + urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/3efe2372ee3d7c2138d6b26e542d757494a7938d.tar.gz"], ), com_github_datadog_dd_opentracing_cpp = dict( sha256 = "6dc1088ab7f788b6c849fbaa6300517c8fdf88991a70b778be79c284c36857bf", @@ -178,9 +178,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-0.6.3.tar.gz"], ), com_github_msgpack_msgpack_c = dict( - sha256 = "fbaa28c363a316fd7523f31d1745cf03eab0d1e1ea5a1c60aa0dffd4ce551afe", - strip_prefix = "msgpack-3.2.0", - urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-3.2.0/msgpack-3.2.0.tar.gz"], + sha256 = "433cbcd741e1813db9ae4b2e192b83ac7b1d2dd7968a3e11470eacc6f4ab58d2", + strip_prefix = "msgpack-3.2.1", + urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-3.2.1/msgpack-3.2.1.tar.gz"], ), com_github_google_jwt_verify = dict( sha256 = "d422a6eadd4bcdd0f9b122cd843a4015f8b18aebea6e1deb004bd4d401a8ef92", @@ -189,9 +189,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/jwt_verify_lib/archive/40e2cc938f4bcd059a97dc6c73f59ecfa5a71bac.tar.gz"], ), com_github_nodejs_http_parser = dict( - sha256 = "ef26268c54c8084d17654ba2ed5140bffeffd2a040a895ffb22a6cca3f6c613f", - strip_prefix = "http-parser-2.9.0", - urls = ["https://github.com/nodejs/http-parser/archive/v2.9.0.tar.gz"], + sha256 = "8fa0ab8770fd8425a9b431fdbf91623c4d7a9cdb842b9339289bd2b0b01b0d3d", + strip_prefix = "http-parser-2.9.3", + urls = ["https://github.com/nodejs/http-parser/archive/v2.9.3.tar.gz"], ), com_github_pallets_jinja = dict( sha256 = "db49236731373e4f3118af880eb91bb0aa6978bc0cf8b35760f6a026f1a9ffc4", @@ -235,10 +235,10 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protobuf-all-3.10.1.tar.gz"], ), grpc_httpjson_transcoding = dict( - sha256 = "a447458b47ea4dc1d31499f555769af437c5d129d988ec1e13d5fdd0a6a36b4e", - strip_prefix = "grpc-httpjson-transcoding-2feabd5d64436e670084091a937855972ee35161", - # 2019-08-28 - urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/2feabd5d64436e670084091a937855972ee35161.tar.gz"], + sha256 = "62c8cb5ea2cca1142cde9d4a0778c52c6022345c3268c60ef81666946b958ad5", + strip_prefix = "grpc-httpjson-transcoding-faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6", + # 2020-03-02 + urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6.tar.gz"], ), io_bazel_rules_go = dict( sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b", @@ -269,26 +269,31 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/WAVM/WAVM/archive/1ec06cd202a922015c9041c5ed84f875453c4dc7.tar.gz"], ), io_opencensus_cpp = dict( - sha256 = "a9ba6027436cfa1264860c6be602da7633d9a1f9abcb8838f2ae6bda8c2c14f6", - strip_prefix = "opencensus-cpp-13b1a2f29f541b6b2c4cb8bc3f6fbf3589d44227", - # 2019-12-01 - urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/13b1a2f29f541b6b2c4cb8bc3f6fbf3589d44227.tar.gz"], + sha256 = "193ffb4e13bd7886757fd22b61b7f7a400634412ad8e7e1071e73f57bedd7fc6", + strip_prefix = "opencensus-cpp-04ed0211931f12b03c1a76b3907248ca4db7bc90", + # 2020-03-24 + urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/04ed0211931f12b03c1a76b3907248ca4db7bc90.tar.gz"], ), com_github_curl = dict( - sha256 = "1dd7604e418b0b9a9077f62f763f6684c1b092a7bc17e3f354b8ad5c964d7358", - strip_prefix = "curl-7.68.0", - urls = ["https://github.com/curl/curl/releases/download/curl-7_68_0/curl-7.68.0.tar.gz"], + sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98", + strip_prefix = "curl-7.69.1", + urls = ["https://github.com/curl/curl/releases/download/curl-7_69_1/curl-7.69.1.tar.gz"], ), com_googlesource_chromium_v8 = dict( # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh # and contains complete checkout of V8 with all dependencies necessary to build wee8. - sha256 = "d166eb74f15c97e4df9cb63ee4ed7f0af67fc36024f01f9286d9fe52b84530dd", - urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-8.1.307.23.tar.gz"], + sha256 = "03ff00e41cf259db473dfade9548493e4a2372c0b701a66cd7ff76215bd55a64", + urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-8.1.307.28.tar.gz"], ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/4f6ba16cf16505f12dc8d2f23cfc6e13c1aa5806.tar.gz - sha256 = "2990ca4434a6fc1b63560b1aa000f5765c174ad906642e123a92498f1ad6c03d", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/4f6ba16cf16505f12dc8d2f23cfc6e13c1aa5806.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz + sha256 = "75af53154402e1654cfd32d8aaeed5fab4dbb79d3cab8c9866019d5369c1889e", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz"], + ), + com_googlesource_googleurl = dict( + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz + sha256 = "b40cd22cadba577b7281a76db66f6a66dd744edbad8cc2c861c2c976ef721e4d", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz"], ), com_google_cel_cpp = dict( sha256 = "d942a8d2e5831bcf7f5b1e99b07f90534eb082f40fd9bda05bcc24ff9c0c3571", @@ -297,9 +302,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/cel-cpp/archive/a9eec4686b72c28980a09fe2e253ec897a781c32.tar.gz"], ), com_googlesource_code_re2 = dict( - sha256 = "7268e1b4254d9ffa5ccf010fee954150dbb788fd9705234442e7d9f0ee5a42d3", - strip_prefix = "re2-2019-12-01", - urls = ["https://github.com/google/re2/archive/2019-12-01.tar.gz"], + sha256 = "04ee2aaebaa5038554683329afc494e684c30f82f2a1e47eb62450e59338f84d", + strip_prefix = "re2-2020-03-03", + urls = ["https://github.com/google/re2/archive/2020-03-03.tar.gz"], ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but # provided as part of the compiler-rt source distribution. We can't use the @@ -307,7 +312,7 @@ REPOSITORY_LOCATIONS = dict( org_llvm_releases_compiler_rt = dict( sha256 = "56e4cd96dd1d8c346b07b4d6b255f976570c6f2389697347a6c3dcb9e820d10e", # Only allow peeking at fuzzer related files for now. - strip_prefix = "compiler-rt-9.0.0.src/lib/fuzzer", + strip_prefix = "compiler-rt-9.0.0.src/lib", urls = ["http://releases.llvm.org/9.0.0/compiler-rt-9.0.0.src.tar.xz"], ), fuzzit_linux = dict( diff --git a/ci/check_and_fix_format.sh b/ci/check_and_fix_format.sh index 7ae4a7ca2d0b..7d5fe0a54d18 100755 --- a/ci/check_and_fix_format.sh +++ b/ci/check_and_fix_format.sh @@ -4,6 +4,12 @@ set -e DIFF_OUTPUT="${DIFF_OUTPUT:-/build/fix_format.diff}" +# We set this for two reasons. First, we want to ensure belt-and-braces that we check these formats +# in CI in case the skip-on-file-change heuristics in proto_format.sh etc. are buggy. Second, this +# prevents AZP cache weirdness. +export FORCE_PROTO_FORMAT=yes +export FORCE_PYTHON_FORMAT=yes + function fix { set +e ci/do_ci.sh fix_format diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 1d6029102847..4516ec135fe8 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -58,6 +58,10 @@ function cp_binary_for_image_build() { # Copy for azp which doesn't preserve permissions, creating a tar archive tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${ENVOY_SRCDIR}" build_"$1" build_"$1"_stripped + + # Remove binaries to save space, only if BUILD_REASON exists (running in AZP) + [[ -z "${BUILD_REASON}" ]] || \ + rm -rf "${ENVOY_SRCDIR}"/build_"$1" "${ENVOY_SRCDIR}"/build_"$1"_stripped "${ENVOY_DELIVERY_DIR}"/envoy } function bazel_binary_build() { @@ -276,12 +280,10 @@ elif [[ "$CI_TARGET" == "bazel.fuzzit" ]]; then elif [[ "$CI_TARGET" == "fix_format" ]]; then # proto_format.sh needs to build protobuf. setup_clang_toolchain - echo "protoxform_test..." - ./tools/protoxform/protoxform_test.sh echo "fix_format..." ./tools/code_format/check_format.py fix ./tools/code_format/format_python_tools.sh fix - ./tools/proto_format/proto_format.sh fix + ./tools/proto_format/proto_format.sh fix --test exit 0 elif [[ "$CI_TARGET" == "check_format" ]]; then # proto_format.sh needs to build protobuf. @@ -291,7 +293,7 @@ elif [[ "$CI_TARGET" == "check_format" ]]; then echo "check_format..." ./tools/code_format/check_format.py check ./tools/code_format/format_python_tools.sh check - ./tools/proto_format/proto_format.sh check + ./tools/proto_format/proto_format.sh check --test exit 0 elif [[ "$CI_TARGET" == "check_repositories" ]]; then echo "check_repositories..." diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index 0bdec2c20030..56745372e0c0 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -8,6 +8,7 @@ export LLVM_CONFIG=${LLVM_CONFIG:-llvm-config} LLVM_PREFIX=${LLVM_PREFIX:-$(${LLVM_CONFIG} --prefix)} CLANG_TIDY=${CLANG_TIDY:-$(${LLVM_CONFIG} --bindir)/clang-tidy} CLANG_APPLY_REPLACEMENTS=${CLANG_APPLY_REPLACEMENTS:-$(${LLVM_CONFIG} --bindir)/clang-apply-replacements} +FIX_YAML=clang-tidy-fixes.yaml # Quick syntax check of .clang-tidy. ${CLANG_TIDY} -dump-config > /dev/null 2> clang-tidy-config-errors.txt @@ -57,20 +58,30 @@ function filter_excludes() { if [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then echo "Running full clang-tidy..." - "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" \ + python3 "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" \ -clang-tidy-binary=${CLANG_TIDY} \ -clang-apply-replacements-binary=${CLANG_APPLY_REPLACEMENTS} \ + -export-fixes=${FIX_YAML} \ + -j ${NUM_CPUS:-0} -p 1 -quiet \ ${APPLY_CLANG_TIDY_FIXES:+-fix} elif [[ "${BUILD_REASON}" != "PullRequest" ]]; then echo "Running clang-tidy-diff against previous commit..." git diff HEAD^ | filter_excludes | \ - "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ + python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ -clang-tidy-binary=${CLANG_TIDY} \ - -p 1 + -export-fixes=${FIX_YAML} \ + -j ${NUM_CPUS:-0} -p 1 -quiet else echo "Running clang-tidy-diff against master branch..." git diff "remotes/origin/${SYSTEM_PULLREQUEST_TARGETBRANCH}" | filter_excludes | \ - "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ + python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ -clang-tidy-binary=${CLANG_TIDY} \ - -p 1 + -export-fixes=${FIX_YAML} \ + -j ${NUM_CPUS:-0} -p 1 -quiet +fi + +if [[ -s "${FIX_YAML}" ]]; then + echo "clang-tidy check failed, potentially fixed by clang-apply-replacements:" + cat ${FIX_YAML} + exit 1 fi diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh index 930228d77c84..9677009b9965 100755 --- a/ci/windows_ci_steps.sh +++ b/ci/windows_ci_steps.sh @@ -28,6 +28,6 @@ BAZEL_BUILD_OPTIONS="-c opt --config=msvc-cl --show_task_finish --verbose_failur bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //bazel/... --build_tag_filters=-skip_on_windows -bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //source/exe:envoy-static +bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //source/exe:envoy-static --build_tag_filters=-skip_on_windows # bazel ${BAZEL_STARTUP_OPTIONS} test ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows --build_tests_only --test_summary=terse --test_output=errors diff --git a/docs/build.sh b/docs/build.sh index ab6968d36142..ba270fa3f279 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -50,11 +50,12 @@ pip3 install -r "${SCRIPT_DIR}"/requirements.txt # files still. rm -rf bazel-bin/external/envoy_api_canonical -# This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files. -BAZEL_BUILD_OPTIONS+=" --remote_download_outputs=all --strategy=protodoc=sandboxed,local" - export EXTENSION_DB_PATH="$(realpath "${BUILD_DIR}/extension_db.json")" +# This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files. +BAZEL_BUILD_OPTIONS+=" --remote_download_outputs=all --strategy=protodoc=sandboxed,local + --action_env=ENVOY_BLOB_SHA --action_env=EXTENSION_DB_PATH" + # Generate extension database. This maps from extension name to extension # metadata, based on the envoy_cc_extension() Bazel target attributes. ./docs/generate_extension_db.py "${EXTENSION_DB_PATH}" @@ -70,12 +71,11 @@ function generate_api_rst() { # Generate the extensions docs bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api_canonical//:"${API_VERSION}"_protos --aspects \ - tools/protodoc/protodoc.bzl%protodoc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED=1 \ - --action_env=ENVOY_BLOB_SHA --action_env=EXTENSION_DB_PATH="${EXTENSION_DB_PATH}" --host_force_python=PY3 + tools/protodoc/protodoc.bzl%protodoc_aspect --output_groups=rst # Fill in boiler plate for extensions that have google.protobuf.Empty as their # config. - bazel run ${BAZEL_BUILD_OPTIONS} //tools/protodoc:generate_empty -- \ + bazel run ${BAZEL_BUILD_OPTIONS} //tools/protodoc:generate_empty \ "${PWD}"/docs/empty_extensions.json "${PWD}/${GENERATED_RST_DIR}"/api-"${API_VERSION}"/config # We do ** matching below to deal with Bazel cache blah (source proto artifacts diff --git a/docs/root/api-v2/common_messages/common_messages.rst b/docs/root/api-v2/common_messages/common_messages.rst index a284078496bf..853c1604f8cc 100644 --- a/docs/root/api-v2/common_messages/common_messages.rst +++ b/docs/root/api-v2/common_messages/common_messages.rst @@ -12,6 +12,7 @@ Common messages ../api/v2/discovery.proto ../api/v2/core/config_source.proto ../api/v2/core/grpc_service.proto + ../api/v2/core/grpc_method_list.proto ../api/v2/core/http_uri.proto ../api/v2/core/socket_option.proto ../api/v2/auth/cert.proto diff --git a/docs/root/api-v2/data/data.rst b/docs/root/api-v2/data/data.rst index 0f5857448e86..2a4a2fea05e1 100644 --- a/docs/root/api-v2/data/data.rst +++ b/docs/root/api-v2/data/data.rst @@ -6,6 +6,7 @@ Envoy data :maxdepth: 2 accesslog/accesslog - core/core cluster/cluster + core/core + dns/dns tap/tap diff --git a/docs/root/api-v2/data/dns/dns.rst b/docs/root/api-v2/data/dns/dns.rst new file mode 100644 index 000000000000..43e44e4442db --- /dev/null +++ b/docs/root/api-v2/data/dns/dns.rst @@ -0,0 +1,8 @@ +Extensions objects +================== + +.. toctree:: + :glob: + :maxdepth: 2 + + v2alpha/* diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index 82ac2138e78b..faea72f757d6 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -12,6 +12,7 @@ Common messages ../service/discovery/v3/discovery.proto ../config/core/v3/config_source.proto ../config/core/v3/grpc_service.proto + ../config/core/v3/grpc_method_list.proto ../config/core/v3/http_uri.proto ../config/core/v3/socket_option.proto ../extensions/common/ratelimit/v3/ratelimit.proto diff --git a/docs/root/api-v3/config/config.rst b/docs/root/api-v3/config/config.rst index 3725a558c7c8..b7712dcb58bf 100644 --- a/docs/root/api-v3/config/config.rst +++ b/docs/root/api-v3/config/config.rst @@ -15,3 +15,4 @@ Extensions cluster/cluster grpc_credential/grpc_credential retry/retry + wasm/wasm diff --git a/docs/root/api-v3/config/filter/udp/udp.rst b/docs/root/api-v3/config/filter/udp/udp.rst index 9728ddad1497..beaeaf857fc6 100644 --- a/docs/root/api-v3/config/filter/udp/udp.rst +++ b/docs/root/api-v3/config/filter/udp/udp.rst @@ -6,3 +6,4 @@ UDP listener filters :maxdepth: 2 */v2alpha/* + ../../../extensions/filter/udp/*/v3alpha/* diff --git a/docs/root/api-v3/config/wasm/wasm.rst b/docs/root/api-v3/config/wasm/wasm.rst new file mode 100644 index 000000000000..1f6de3ecd332 --- /dev/null +++ b/docs/root/api-v3/config/wasm/wasm.rst @@ -0,0 +1,8 @@ +WASM +==== + +.. toctree:: + :glob: + :maxdepth: 2 + + v3/* diff --git a/docs/root/api-v3/data/data.rst b/docs/root/api-v3/data/data.rst index 0f5857448e86..2a4a2fea05e1 100644 --- a/docs/root/api-v3/data/data.rst +++ b/docs/root/api-v3/data/data.rst @@ -6,6 +6,7 @@ Envoy data :maxdepth: 2 accesslog/accesslog - core/core cluster/cluster + core/core + dns/dns tap/tap diff --git a/docs/root/api-v3/data/dns/dns.rst b/docs/root/api-v3/data/dns/dns.rst new file mode 100644 index 000000000000..da3fe3d1bf13 --- /dev/null +++ b/docs/root/api-v3/data/dns/dns.rst @@ -0,0 +1,8 @@ +Extensions objects +================== + +.. toctree:: + :glob: + :maxdepth: 2 + + v3/* diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst index a073a38d5227..d70345971f01 100644 --- a/docs/root/configuration/best_practices/edge.rst +++ b/docs/root/configuration/best_practices/edge.rst @@ -23,6 +23,7 @@ HTTP proxies should additionally configure: * :ref:`HTTP/2 maximum concurrent streams limit ` to 100, * :ref:`HTTP/2 initial stream window size limit ` to 64 KiB, * :ref:`HTTP/2 initial connection window size limit ` to 1 MiB. +* :ref:`headers_with_underscores_action setting ` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable. The following is a YAML example of the above recommendation. @@ -83,6 +84,7 @@ The following is a YAML example of the above recommendation. use_remote_address: true common_http_protocol_options: idle_timeout: 3600s # 1 hour + headers_with_underscores_action: REJECT_REQUEST http2_protocol_options: max_concurrent_streams: 100 initial_stream_window_size: 65536 # 64 KiB diff --git a/docs/root/configuration/http/http_conn_man/headers.rst b/docs/root/configuration/http/http_conn_man/headers.rst index 704177275234..d27c4fe2d3ec 100644 --- a/docs/root/configuration/http/http_conn_man/headers.rst +++ b/docs/root/configuration/http/http_conn_man/headers.rst @@ -514,6 +514,9 @@ Supported variable names are: %DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT% Same as **%DOWNSTREAM_LOCAL_ADDRESS%** excluding port if the address is an IP address. +%DOWNSTREAM_LOCAL_PORT% + Similar to **%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%**, but only extracts the port portion of the **%DOWNSTREAM_LOCAL_ADDRESS%** + %DOWNSTREAM_LOCAL_URI_SAN% HTTP The URIs present in the SAN of the local certificate used to establish the downstream TLS connection. diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index d58c4bc359b8..cc8b9800d8b6 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -111,8 +111,10 @@ All http1 statistics are rooted at *http1.* :header: Name, Type, Description :widths: 1, 1, 2 + dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. metadata_not_supported_error, Counter, Total number of metadata dropped during HTTP/1 encoding response_flood, Counter, Total number of connections closed due to response flooding + requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. Http2 codec statistics ~~~~~~~~~~~~~~~~~~~~~~ @@ -123,6 +125,7 @@ All http2 statistics are rooted at *http2.* :header: Name, Type, Description :widths: 1, 1, 2 + dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. header_overflow, Counter, Total number of connections reset due to the headers being larger than the :ref:`configured value `. headers_cb_no_stream, Counter, Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug inbound_empty_frames_flood, Counter, Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. @@ -130,6 +133,7 @@ All http2 statistics are rooted at *http2.* inbound_window_update_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type WINDOW_UPDATE. The limit is configured by setting the :ref:`max_inbound_window_updateframes_per_data_frame_sent config setting `. outbound_flood, Counter, Total number of connections terminated for exceeding the limit on outbound frames of all types. The limit is configured by setting the :ref:`max_outbound_frames config setting `. outbound_control_flood, Counter, "Total number of connections terminated for exceeding the limit on outbound frames of types PING, SETTINGS and RST_STREAM. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `." + requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. rx_messaging_error, Counter, Total number of invalid received frames that violated `section 8 `_ of the HTTP/2 spec. This will result in a *tx_reset* rx_reset, Counter, Total number of reset stream frames received by Envoy too_many_header_frames, Counter, Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers diff --git a/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst b/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst index 77cf179cdcd5..f5f1467a738e 100644 --- a/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst +++ b/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst @@ -34,9 +34,11 @@ Calculating the minRTT ^^^^^^^^^^^^^^^^^^^^^^ The minRTT is periodically measured by only allowing a very low outstanding request count to an -upstream cluster and measuring the latency under these ideal conditions. The length of this minRTT -calculation window is variable depending on the number of requests the filter is configured to -aggregate to represent the expected latency of an upstream. +upstream cluster and measuring the latency under these ideal conditions. The calculation is also +triggered in scenarios where the concurrency limit is determined to be the minimum possible value +for 5 consecutive sampling windows. The length of this minRTT calculation window is variable +depending on the number of requests the filter is configured to aggregate to represent the expected +latency of an upstream. A configurable *jitter* value is used to randomly delay the start of the minRTT calculation window by some amount of time. This is not necessary and can be disabled; however, it is recommended to diff --git a/docs/root/configuration/http/http_filters/aws_lambda_filter.rst b/docs/root/configuration/http/http_filters/aws_lambda_filter.rst index 1e59f96a4e82..b7a4ac76e21c 100644 --- a/docs/root/configuration/http/http_filters/aws_lambda_filter.rst +++ b/docs/root/configuration/http/http_filters/aws_lambda_filter.rst @@ -20,12 +20,80 @@ If :ref:`payload_passthrough ` -is set to ``false``, then the HTTP request is transformed to a JSON (the details of the JSON transformation will be -documented once that feature is implemented). +is set to ``false``, then the HTTP request is transformed to a JSON payload with the following schema: + +.. code-block:: + + { + "rawPath": "/path/to/resource", + "method": "GET|POST|HEAD|...", + "headers": {"header-key": "header-value", ... }, + "queryStringParameters": {"key": "value", ...}, + "body": "...", + "isBase64Encoded": true|false + } + +- ``rawPath`` is the HTTP request resource path (including the query string) +- ``method`` is the HTTP request method. For example ``GET``, ``PUT``, etc. +- ``headers`` are the HTTP request headers. If multiple headers share the same name, their values are + coalesced into a single comma-separated value. +- ``queryStringParameters`` are the HTTP request query string parameters. If multiple parameters share the same name, + the last one wins. That is, parameters are _not_ coalesced into a single value if they share the same key name. +- ``body`` the body of the HTTP request is base64-encoded by the filter if the ``content-type`` header exists and is _not_ one of the following: + + - text/* + - application/json + - application/xml + - application/javascript + +Otherwise, the body of HTTP request is added to the JSON payload as is. + +On the other end, the response of the Lambda function must conform to the following schema: + +.. code-block:: + + { + "statusCode": ... + "headers": {"header-key": "header-value", ... }, + "cookies": ["key1=value1; HttpOnly; ...", "key2=value2; Secure; ...", ...], + "body": "...", + "isBase64Encoded": true|false + } + +- The ``statusCode`` field is an integer used as the HTTP response code. If this key is missing, Envoy returns a ``200 + OK``. +- The ``headers`` are used as the HTTP response headers. +- The ``cookies`` are used as ``Set-Cookie`` response headers. Unlike the request headers, cookies are _not_ part of the + response headers because the ``Set-Cookie`` header cannot contain more than one value per the `RFC`_. Therefore, Each + key/value pair in this JSON array will translate to a single ``Set-Cookie`` header. +- The ``body`` is base64-decoded if it is marked as base64-encoded and sent as the body of the HTTP response. + +.. _RFC: https://tools.ietf.org/html/rfc6265#section-4.1 + +.. note:: + + The target cluster must have its endpoint set to the `regional Lambda endpoint`_. Use the same region as the Lambda + function. + + AWS IAM credentials must be defined in either environment variables, EC2 metadata or ECS task metadata. + + +.. _regional Lambda endpoint: https://docs.aws.amazon.com/general/latest/gr/lambda-service.html The filter supports :ref:`per-filter configuration `. -Below are some examples the show how the filter can be used in different deployment scenarios. + +If you use the per-filter configuration, the target cluster _must_ have the following metadata: + +.. code-block:: yaml + + metadata: + filter_metadata: + com.amazonaws.lambda: + egress_gateway: true + + +Below are some examples that show how the filter can be used in different deployment scenarios. Example configuration --------------------- diff --git a/docs/root/configuration/http/http_filters/grpc_stats_filter.rst b/docs/root/configuration/http/http_filters/grpc_stats_filter.rst index 6c17f7e4c16c..78ab88624dff 100644 --- a/docs/root/configuration/http/http_filters/grpc_stats_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_stats_filter.rst @@ -11,11 +11,17 @@ gRPC Statistics This is a filter which enables telemetry of gRPC calls. Additionally, the filter detects message boundaries in streaming gRPC calls and emits the message -counts for both the request and the response. The filter emits statistics in -the *cluster..grpc.* namespace. +counts for both the request and the response. More info: wire format in `gRPC over HTTP/2 `_. +The filter emits statistics in the *cluster..grpc.* namespace. Depending on the +configuration, the stats may be prefixed with `..`; the stats in the table below +are shown in this form. See the documentation for +:ref:`individual_method_stats_allowlist ` +and :ref:`stats_for_all_methods `. + + .. csv-table:: :header: Name, Type, Description :widths: 1, 1, 2 diff --git a/docs/root/configuration/http/http_filters/router_filter.rst b/docs/root/configuration/http/http_filters/router_filter.rst index 475fd9e93dd4..d1a5d17f1a40 100644 --- a/docs/root/configuration/http/http_filters/router_filter.rst +++ b/docs/root/configuration/http/http_filters/router_filter.rst @@ -390,6 +390,11 @@ owning HTTP connection manager. rq_reset_after_downstream_response_started, Counter, Total requests that were reset after downstream response had started rq_retry_skipped_request_not_complete, Counter, Total retries that were skipped as the request is not yet complete +.. _config_http_filters_router_vcluster_stats: + +Virtual Clusters +^^^^^^^^^^^^^^^^ + Virtual cluster statistics are output in the *vhost..vcluster..* namespace and include the following statistics: @@ -400,7 +405,13 @@ statistics: upstream_rq_<\*xx>, Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" + upstream_rq_retry, Counter, Total request retries + upstream_rq_retry_limit_exceeded, Counter, Total requests not retried due to exceeding :ref:`the configured number of maximum retries ` + upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budgets ` + upstream_rq_retry_success, Counter, Total request retry successes upstream_rq_time, Histogram, Request time milliseconds + upstream_rq_timeout, Counter, Total requests that timed out waiting for a response + upstream_rq_total, Counter, Total requests initiated by the router to the upstream Runtime ------- diff --git a/docs/root/configuration/listeners/udp_filters/dns_filter.rst b/docs/root/configuration/listeners/udp_filters/dns_filter.rst new file mode 100644 index 000000000000..25b667ff40f2 --- /dev/null +++ b/docs/root/configuration/listeners/udp_filters/dns_filter.rst @@ -0,0 +1,61 @@ +.. _config_udp_listener_filters_dns_filter: + +DNS Filter +========== + +.. attention:: + + DNS Filter is under active development and should be considered alpha and not production ready. + +* :ref:`v2 API reference ` +* This filter should be configured with the name *envoy.filters.udp_listener.dns_filter* + +Overview +-------- + +The DNS filter allows Envoy to respond to DNS queries as an authoritative server for any configured +domains. The filter's configuration specifies the names and addresses for which Envoy will answer +as well as the configuration needed to send queries externally for unknown domains. + +The filter supports :ref:`per-filter configuration +`. +An Example configuration follows that illustrates how the filter can be used. + +Example Configuration +--------------------- + +.. code-block:: yaml + + listener_filters: + name: "envoy.filters.udp.dns_filter" + typed_config: + "@type": "type.googleapis.com/envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig" + stat_prefix: "dns_filter_prefix" + server_config: + inline_dns_table: + external_retry_count: 3 + known_suffixes: + - suffix: "domain1.com" + - suffix: "domain2.com" + - suffix: "domain3.com" + virtual_domains: + - name: "www.domain1.com" + endpoint: + address_list: + address: + - 10.0.0.1 + - 10.0.0.2 + - name: "www.domain2.com" + endpoint: + address_list: + address: + - 2001:8a:c1::2800:7 + - name: "www.domain3.com" + endpoint: + address_list: + address: + - 10.0.3.1 + + +In this example, Envoy is configured to respond to client queries for three domains. For any +other query, it will forward upstream to external resolvers. diff --git a/docs/root/configuration/listeners/udp_filters/udp_filters.rst b/docs/root/configuration/listeners/udp_filters/udp_filters.rst index 1665052de2b6..0a9a2017987d 100644 --- a/docs/root/configuration/listeners/udp_filters/udp_filters.rst +++ b/docs/root/configuration/listeners/udp_filters/udp_filters.rst @@ -9,3 +9,5 @@ Envoy has the following builtin UDP listener filters. :maxdepth: 2 udp_proxy + dns_filter + diff --git a/docs/root/configuration/observability/access_log.rst b/docs/root/configuration/observability/access_log.rst index 269775de44af..21b22885e178 100644 --- a/docs/root/configuration/observability/access_log.rst +++ b/docs/root/configuration/observability/access_log.rst @@ -256,7 +256,7 @@ The following command operators are supported: * **UH**: No healthy upstream hosts in upstream cluster in addition to 503 response code. * **UF**: Upstream connection failure in addition to 503 response code. * **UO**: Upstream overflow (:ref:`circuit breaking `) in addition to 503 response code. - * **NR**: No :ref:`route configured ` for a given request in addition to 404 response code. + * **NR**: No :ref:`route configured ` for a given request in addition to 404 response code, or no matching filter chain for a downstream connection. * **URX**: The request was rejected because the :ref:`upstream retry limit (HTTP) ` or :ref:`maximum connect attempts (TCP) ` was reached. HTTP only * **DC**: Downstream connection termination. @@ -351,6 +351,9 @@ The following command operators are supported: %DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT% Same as **%DOWNSTREAM_LOCAL_ADDRESS%** excluding port if the address is an IP address. +%DOWNSTREAM_LOCAL_PORT% + Similar to **%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%**, but only extracts the port portion of the **%DOWNSTREAM_LOCAL_ADDRESS%** + %REQ(X?Y):Z% HTTP An HTTP request header where X is the main HTTP header, Y is the alternative one, and Z is an diff --git a/docs/root/configuration/observability/statistics.rst b/docs/root/configuration/observability/statistics.rst index 3aca1fb17253..a3a3feab1a9c 100644 --- a/docs/root/configuration/observability/statistics.rst +++ b/docs/root/configuration/observability/statistics.rst @@ -23,7 +23,7 @@ Server related statistics are rooted at *server.* with following statistics: state, Gauge, Current :ref:`State ` of the Server. parent_connections, Gauge, Total connections of the old Envoy process on hot restart total_connections, Gauge, Total connections of both new and old Envoy processes - version, Gauge, Integer represented version number based on SCM revision or :ref:stats_server_version_override` ` if set. + version, Gauge, Integer represented version number based on SCM revision or :ref:`stats_server_version_override ` if set. days_until_first_cert_expiring, Gauge, Number of days until the next certificate being managed will expire hot_restart_epoch, Gauge, Current hot restart epoch -- an integer passed via command line flag `--restart-epoch` usually indicating generation. hot_restart_generation, Gauge, Current hot restart generation -- like hot_restart_epoch but computed automatically by incrementing from parent. diff --git a/docs/root/configuration/security/secret.rst b/docs/root/configuration/security/secret.rst index 2772acd899b5..b1b3e1ec33fc 100644 --- a/docs/root/configuration/security/secret.rst +++ b/docs/root/configuration/security/secret.rst @@ -33,7 +33,7 @@ SDS Configuration *SdsSecretConfig* is used in two fields in :ref:`CommonTlsContext `. The first field is *tls_certificate_sds_secret_configs* to use SDS to get :ref:`TlsCertificate `. The second field is *validation_context_sds_secret_config* to use SDS to get :ref:`CertificateValidationContext `. -Examples one: static_resource +Example one: static_resource ----------------------------- This example show how to configure secrets in the static_resource: @@ -88,7 +88,9 @@ This example show how to configure secrets in the static_resource: In this example, certificates are specified in the bootstrap static_resource, they are not fetched remotely. In the config, *secrets* static resource has 3 secrets: **client_cert**, **server_cert** and **validation_context**. In the cluster config, one of hosts uses **client_cert** in its *tls_certificate_sds_secret_configs*. In the listeners section, one of them uses **server_cert** in its *tls_certificate_sds_secret_configs* and **validation_context** for its *validation_context_sds_secret_config*. -Examples two: SDS server +.. _sds_server_example: + +Example two: SDS server ------------------------ This example shows how to configure secrets fetched from remote SDS servers: @@ -173,6 +175,68 @@ This example shows how to configure secrets fetched from remote SDS servers: For illustration, above example uses three methods to access the SDS server. A gRPC SDS server can be reached by Unix Domain Socket path **/tmp/uds_path** and **127.0.0.1:8234** by mTLS. It provides three secrets, **client_cert**, **server_cert** and **validation_context**. In the config, cluster **example_cluster** certificate **client_cert** is configured to use Google gRPC with UDS to talk to the SDS server. The Listener needs to fetch **server_cert** and **validation_context** from the SDS server. The **server_cert** is using Envoy gRPC with cluster **sds_server_mtls** configured with client certificate to use mTLS to talk to SDS server. The **validate_context** is using Envoy gRPC with cluster **sds_server_uds** configured with UDS path to talk to the SDS server. +.. _xds_certificate_rotation: + +Example three: certificate rotation for xDS gRPC connection +------------------------------------------------------------ + +Managing certificates for xDS gRPC connection between Envoy and xDS server introduces a bootstrapping problem: SDS server cannot manage certificates that are required to connect to the server. + +This example shows how to set up xDS connection by sourcing SDS configuration from the filesystem. +The certificate and key files are watched with inotify and reloaded automatically without restart. +In contrast, :ref:`sds_server_example` requires a restart to reload xDS certificates and key after update. + +.. code-block:: yaml + + clusters: + - name: control_plane + type: LOGICAL_DNS + connect_timeout: 1s + load_assignment: + cluster_name: control_plane + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: controlplane + port_value: 8443 + http2_protocol_options: {} + transport_socket: + name: "envoy.transport_sockets.tls" + typed_config: + "@type": "type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext" + common_tls_context: + tls_certificate_sds_secret_configs: + sds_config: + path: /etc/envoy/tls_certificate_sds_secret.yaml + validation_context_sds_secret_config: + sds_config: + path: /etc/envoy/validation_context_sds_secret.yaml + +Paths to client certificate, including client's certificate chain and private key are given in SDS config file ``/etc/envoy/tls_certificate_sds_secret.yaml``: + +.. code-block:: yaml + + resources: + - "@type": "type.googleapis.com/envoy.api.v2.auth.Secret" + tls_certificate: + certificate_chain: + filename: /certs/sds_cert.pem + private_key: + filename: /certs/sds_key.pem + +Path to CA certificate bundle for validating the xDS server certificate is given in SDS config file ``/etc/envoy/validation_context_sds_secret.yaml``: + +.. code-block:: yaml + + resources: + - "@type": "type.googleapis.com/envoy.api.v2.auth.Secret" + validation_context: + trusted_ca: + filename: /certs/cacert.pem + + Statistics ---------- SSL socket factory outputs following SDS related statistics. They are all counter type. @@ -194,4 +258,3 @@ For upstream clusters, they are in the *cluster..client_ssl_socket ssl_context_update_by_sds, Total number of ssl context has been updated. upstream_context_secrets_not_ready, Total number of upstream connections reset due to empty ssl certificate. - diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index f234ed3d51a7..e58a1d32c90c 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -73,8 +73,9 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_rq_rx_reset, Counter, Total requests that were reset remotely upstream_rq_tx_reset, Counter, Total requests that were reset locally upstream_rq_retry, Counter, Total request retries + upstream_rq_retry_limit_exceeded, Counter, Total requests not retried due to exceeding :ref:`the configured number of maximum retries ` upstream_rq_retry_success, Counter, Total request retry successes - upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the retry budget + upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budget ` upstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from upstream upstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from upstream upstream_flow_control_backed_up_total, Counter, Total number of times the upstream connection backed up and paused reads from downstream diff --git a/docs/root/extending/extending.rst b/docs/root/extending/extending.rst index 51198fefacc4..1e63c9e99b94 100644 --- a/docs/root/extending/extending.rst +++ b/docs/root/extending/extending.rst @@ -18,6 +18,7 @@ types including: * :ref:`Retry implementations ` * :ref:`Stat sinks ` * :ref:`Tracers ` +* :ref:`Request ID ` * Transport sockets * BoringSSL private key methods diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 65f3e9efce2c..4cdca3a57167 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -41,7 +41,7 @@ context request/stream is interchangeable. * The HTTP connection manager :ref:`request_timeout ` is the amount of time the connection manager will allow for the *entire request stream* to be - received by the client. + received from the client. .. attention:: diff --git a/docs/root/faq/load_balancing/transient_failures.rst b/docs/root/faq/load_balancing/transient_failures.rst index 109a630eb0dc..0be611892982 100644 --- a/docs/root/faq/load_balancing/transient_failures.rst +++ b/docs/root/faq/load_balancing/transient_failures.rst @@ -3,30 +3,32 @@ How do I handle transient failures? =================================== -One of the biggest advantages of using Envoy in a service mesh is that it frees up services -from implementing complex resiliency features like circuit breaking, outlier detection and retries -that enable services to be resilient to realities such as rolling upgrades, dynamic infrastructure, -and network failures. Having these features implemented at Envoy not only improves the availability -and resiliency of services but also brings in consistency in terms of the behaviour and observability. +One of the biggest advantages of using Envoy in a service mesh is that it frees up services +from implementing complex resiliency features like circuit breaking, outlier detection and retries +that enable services to be resilient to realities such as rolling upgrades, dynamic infrastructure, +and network failures. Having these features implemented at Envoy not only improves the availability +and resiliency of services but also brings in consistency in terms of the behaviour and observability. -This section explains at a high level the configuration supported by Envoy and how these features can be +This section explains at a high level the configuration supported by Envoy and how these features can be used together to handle these scenarios. Circuit Breaking ---------------- -:ref:`Circuit Breaking ` is a critical component of distributed systems. +:ref:`Circuit Breaking ` is a critical component of distributed systems. Circuit breaking lets applications configure failure thresholds that ensure safe maximums, allowing components to fail quickly and apply back pressure as soon as possible. Applying correct circuit breaking thresholds helps -to save resources which otherwise are wasted in waiting for requests (timeouts) or retrying requests unnecessarily. +to save resources which otherwise are wasted in waiting for requests (timeouts) or retrying requests unnecessarily. One of the main advantages of the circuit breaking implementation in Envoy is that the circuit breaking limits are applied at the network level. +.. _common_configuration_transient_failures_retries: + Retries ------- -Automatic :ref:`request retries ` is another method of ensuring service resilience. Request retries should -typically be used to guard against transient failures. Envoy supports very rich set of configurable parameters that dictate what type +Automatic :ref:`request retries ` is another method of ensuring service resilience. Request retries should +typically be used to guard against transient failures. Envoy supports very rich set of configurable parameters that dictate what type of requests are retried, how many times the request should be retried, timeouts for retries, etc. Retries in gRPC services @@ -37,24 +39,24 @@ For gRPC services, Envoy looks at the gRPC status in the response and attempts a The following application status codes in gRPC are considered safe for automatic retry. * *CANCELLED* - Return this code if there is an error that can be retried in the service. -* *RESOURCE_EXHAUSTED* - Return this code if some of the resources that service depends on are exhausted in that instance so that retrying +* *RESOURCE_EXHAUSTED* - Return this code if some of the resources that service depends on are exhausted in that instance so that retrying to another instance would help. Please note that for shared resource exhaustion, returning this will not help. Instead :ref:`rate limiting ` should be used to handle such cases. -The HTTP Status codes *502 (Bad Gateway)*, *503 (Service Unavailable)* and *504 (Gateway Timeout)* are all mapped to gRPC status code *UNAVAILABLE*. +The HTTP Status codes *502 (Bad Gateway)*, *503 (Service Unavailable)* and *504 (Gateway Timeout)* are all mapped to gRPC status code *UNAVAILABLE*. This can also be considered safe for automatic retry. The idempotency of a request is an important consideration when configuring retries. -Envoy also supports extensions to its retry policies. The :ref:`retry plugins ` +Envoy also supports extensions to its retry policies. The :ref:`retry plugins ` allow you to customize the Envoy retry implementation to your application. Outlier Detection ----------------- :ref:`Outlier detection ` is a way of dynamically detecting misbehaving hosts -in the upstream cluster. By detecting such hosts and ejecting them for a temporary period of time from the healthy -load balancing set, Envoy can increase the success rate of a cluster. Envoy supports configuring outlier detection +in the upstream cluster. By detecting such hosts and ejecting them for a temporary period of time from the healthy +load balancing set, Envoy can increase the success rate of a cluster. Envoy supports configuring outlier detection based on continuous *5xx*, continuous gateway failures and success rate. Envoy also allows you to configure the ejection period. @@ -63,8 +65,8 @@ Envoy also allows you to configure the ejection period. The following settings help to optimize some combination of: -* Maximum request success for common scenarios (i.e. rolling upgrade) -* Speed +* Maximum request success for common scenarios (i.e. rolling upgrade) +* Speed * Avoid cascading failures @@ -81,9 +83,9 @@ The following settings help to optimize some combination of: } For the purpose of this specific use case, the retry budget for upstream cluster should be configured to -enable and control concurrent retries. If the value configured is too low, some requests will not be retried, +enable and control concurrent retries. If the value configured is too low, some requests will not be retried, which can be measured via :ref:`upstream_rq_retry_overflow `. -If the value configured is too high, the service can be overwhelmed with retry requests. +If the value configured is too high, the service can be overwhelmed with retry requests. *Outlier Detection* @@ -97,9 +99,9 @@ If the value configured is too high, the service can be overwhelmed with retry r "consecutive_gateway_failure": 5, } -This setting enables outlier detection if there are 5 consecutive *5xx* or *gateway failures* -and limits the number of hosts that are ejected to 50% of the upstream cluster size. This configuration -places a safe limit on the number of hosts removed. Please note that once a host a ejected, it will be returned +This setting enables outlier detection if there are 5 consecutive *5xx* or *gateway failures* +and limits the number of hosts that are ejected to 50% of the upstream cluster size. This configuration +places a safe limit on the number of hosts removed. Please note that once a host a ejected, it will be returned to the pool after an ejection time is elapsed (which is equal to the *base_ejection_time* multiplied by the number of times the host has been ejected). @@ -118,7 +120,7 @@ of times the host has been ejected). "host_selection_retry_max_attempts": "5" } -The request will be retried based on the conditions documented in *retry_on*. This setting also configures Envoy to use +The request will be retried based on the conditions documented in *retry_on*. This setting also configures Envoy to use :ref:`Previous Host Retry Predicate ` that allows it to choose a different -host than the host where previous request has failed, because typically failures on that same host are likely to continue -for some time and immediate retry would have less chance of success. +host than the host where previous request has failed, because typically failures on that same host are likely to continue +for some time and immediate retry would have less chance of success. diff --git a/docs/root/intro/arch_overview/listeners/dns_filter.rst b/docs/root/intro/arch_overview/listeners/dns_filter.rst new file mode 100644 index 000000000000..f6090c577ff9 --- /dev/null +++ b/docs/root/intro/arch_overview/listeners/dns_filter.rst @@ -0,0 +1,5 @@ +DNS Filter +========== + +Envoy supports DNS responses via a :ref:`UDP listener DNS Filter +`. diff --git a/docs/root/intro/arch_overview/listeners/listeners_toc.rst b/docs/root/intro/arch_overview/listeners/listeners_toc.rst index 922cb3e72447..77c377c8cbe8 100644 --- a/docs/root/intro/arch_overview/listeners/listeners_toc.rst +++ b/docs/root/intro/arch_overview/listeners/listeners_toc.rst @@ -9,3 +9,4 @@ Listeners network_filters tcp_proxy udp_proxy + dns_filter diff --git a/docs/root/intro/arch_overview/observability/access_logging.rst b/docs/root/intro/arch_overview/observability/access_logging.rst index feb98cc701dd..fa41bd0d377b 100644 --- a/docs/root/intro/arch_overview/observability/access_logging.rst +++ b/docs/root/intro/arch_overview/observability/access_logging.rst @@ -4,13 +4,18 @@ Access logging ============== The :ref:`HTTP connection manager ` and -:ref:`tcp proxy ` supports extensible access logging with the following +:ref:`tcp proxy ` support extensible access logging with the following features: -* Any number of access logs per connection manager or tcp proxy. +* Any number of access logs per a connection stream. * Customizable access log filters that allow different types of requests and responses to be written to different access logs. +Downstream connection access logging can be enabled using :ref:`listener access +logs`. The listener access logs complement +HTTP request access logging and can be enabled separately and independently from +filter access logs. + .. _arch_overview_access_log_filters: Access log filters diff --git a/docs/root/intro/arch_overview/observability/tracing.rst b/docs/root/intro/arch_overview/observability/tracing.rst index effccb636b84..74657ff4ca99 100644 --- a/docs/root/intro/arch_overview/observability/tracing.rst +++ b/docs/root/intro/arch_overview/observability/tracing.rst @@ -11,7 +11,7 @@ sources of latency. Envoy supports three features related to system wide tracing * **Request ID generation**: Envoy will generate UUIDs when needed and populate the :ref:`config_http_conn_man_headers_x-request-id` HTTP header. Applications can forward the - x-request-id header for unified logging as well as tracing. + x-request-id header for unified logging as well as tracing. The behavior can be configured on per :ref:`HTTP connection manager` basis using an extension. * **Client trace ID joining**: The :ref:`config_http_conn_man_headers_x-client-trace-id` header can be used to join untrusted request IDs to the trusted internal :ref:`config_http_conn_man_headers_x-request-id`. diff --git a/docs/root/intro/arch_overview/security/ssl.rst b/docs/root/intro/arch_overview/security/ssl.rst index 76ac394b7564..ebcb2e8f6838 100644 --- a/docs/root/intro/arch_overview/security/ssl.rst +++ b/docs/root/intro/arch_overview/security/ssl.rst @@ -163,7 +163,9 @@ Only a single TLS certificate is supported today for :ref:`UpstreamTlsContexts Secret discovery service (SDS) ------------------------------ -TLS certificates can be specified in the static resource or can be fetched remotely. Please see :ref:`SDS ` for details. +TLS certificates can be specified in the static resource or can be fetched remotely. +Certificate rotation is supported for static resources by sourcing :ref:`SDS configuration from the filesystem ` or by pushing updates from the SDS server. +Please see :ref:`SDS ` for details. .. _arch_overview_ssl_auth_filter: diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst index 0127f44a9dab..1a52a216917f 100644 --- a/docs/root/intro/deprecated.rst +++ b/docs/root/intro/deprecated.rst @@ -10,8 +10,8 @@ The following features have been DEPRECATED and will be removed in the specified A logged warning is expected for each deprecated item that is in deprecation window. Deprecated items below are listed in chronological order. -1.14.0 (Pending) -================ +1.14.0 (April 8, 2020) +====================== * The previous behavior for upstream connection pool circuit breaking described `here `_ has been deprecated in favor of the new behavior described :ref:`here `. @@ -92,6 +92,17 @@ Deprecated items below are listed in chronological order. been deprecated in favor of `compressor`. * The statistics counter `header_gzip` in :ref:`HTTP Gzip filter ` has been deprecated in favor of `header_compressor_used`. +* Support for the undocumented HTTP/1.1 `:no-chunks` pseudo-header has been removed. If an extension + was using this it can achieve the same behavior via the new `http1StreamEncoderOptions()` API. +* The grpc_stats filter behavior of by default creating a new stat for every message type seen is deprecated. + The default will switch to only creating a fixed set of stats. The previous behavior can be enabled by enabling + :ref:`stats_for_all_methods `, + and the previous default can be enabled until the end of the deprecation period by enabling runtime feature + `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. +* The :ref:`source_ip ` field in + `RBAC `_ has been deprecated + in favor of :ref:`direct_remote_ip ` and + :ref:`remote_ip `. 1.13.0 (January 20, 2020) ========================= diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index afafc2ac3112..c67693bd7fe7 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -1,74 +1,111 @@ Version history --------------- -1.14.0 (Pending) -================ -* access loggers: access logger extensions use the "envoy.access_loggers" name space. A mapping +1.14.1 (April 8, 2020) +====================== +* request_id_extension: fixed static initialization for noop request id extension. + +1.14.0 (April 8, 2020) +====================== +* access log: access logger extensions use the "envoy.access_loggers" name space. A mapping of extension names is available in the :ref:`deprecated ` documentation. -* access log: fix %DOWSTREAM_DIRECT_REMOTE_ADDRESS% when used with PROXY protocol listener filter +* access log: added support for `%DOWNSTREAM_LOCAL_PORT%` :ref:`access log formatters `. +* access log: fixed `%DOWSTREAM_DIRECT_REMOTE_ADDRESS%` when used with PROXY protocol listener filter. +* access log: introduced :ref:`connection-level access loggers`. * adaptive concurrency: fixed bug that allowed concurrency limits to drop below the configured minimum. -* aws_request_signing: a few fixes so that it works with S3. +* adaptive concurrency: minRTT is now triggered when the minimum concurrency is maintained for 5 + consecutive sampling intervals. * admin: added support for displaying ip address subject alternate names in :ref:`certs` end point. * admin: added :http:post:`/reopen_logs` endpoint to control log rotation. -* buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation. -* config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. -* grpc-json: added support for building HTTP request into - `google.api.HttpBody `_. +* api: froze v2 xDS API. New feature development in the API should occur in v3 xDS. While the v2 xDS API has + been deprecated since 1.13.0, it will continue to be supported by Envoy until EOY 2020. See + :ref:`api_supported_versions`. +* aws_lambda: added :ref:`AWS Lambda filter ` that converts HTTP requests to Lambda + invokes. This effectively makes Envoy act as an egress gateway to AWS Lambda. +* aws_request_signing: a few fixes so that it works with S3. * config: added stat :ref:`update_time `. +* config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. * datasource: added retry policy for remote async data source. -* dns: the STRICT_DNS cluster now only resolves to 0 hosts if DNS resolution successfully returns 0 hosts. * dns: added support for :ref:`dns_failure_refresh_rate ` for the :ref:`dns cache ` to set the DNS refresh rate during failures. -* http filters: http filter extensions use the "envoy.filters.http" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. +* dns: the STRICT_DNS cluster now only resolves to 0 hosts if DNS resolution successfully returns 0 hosts. +* eds: added :ref:`hostname ` field for endpoints and :ref:`hostname ` field for endpoint's health check config. This enables auto host rewrite and customizing the host header during health checks for eds endpoints. * ext_authz: disabled the use of lowercase string matcher for headers matching in HTTP-based `ext_authz`. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher` to false. * fault: added support for controlling abort faults with :ref:`HTTP header fault configuration ` to the HTTP fault filter. -* http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection` -* http: fixing a bug in HTTP/1.0 responses where Connection: keep-alive was not appended for connections which were kept alive. -* http: fixed a bug that could send extra METADATA frames and underflow memory when encoding METADATA frames on a connection that was dispatching data. -* http: connection header sanitizing has been modified to always sanitize if there is no upgrade, including when an h2c upgrade attempt has been removed. +* grpc-json: added support for building HTTP request into + `google.api.HttpBody `_. +* grpc-stats: added option to limit which messages stats are created for. +* http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection`. +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. * http: added :ref:`max_stream_duration ` to specify the duration of existing streams. See :ref:`connection and stream timeouts `. +* http: connection header sanitizing has been modified to always sanitize if there is no upgrade, including when an h2c upgrade attempt has been removed. +* http: fixed a bug that could send extra METADATA frames and underflow memory when encoding METADATA frames on a connection that was dispatching data. +* http: fixing a bug in HTTP/1.0 responses where Connection: keep-alive was not appended for connections which were kept alive. +* http: http filter extensions use the "envoy.filters.http" name space. A mapping + of extension names is available in the :ref:`deprecated ` documentation. +* http: the runtime feature `http.connection_manager.log_flood_exception` is removed and replaced with a connection access log response code. +* http: upgrade parser library, which removes support for "identity" transfer-encoding value. * listener filters: listener filter extensions use the "envoy.filters.listener" name space. A mapping of extension names is available in the :ref:`deprecated ` documentation. -* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. +* listeners: added :ref:`listener filter matcher api ` to disable individual listener filter on matching downstream connections. +* loadbalancing: added support for using hostname for consistent hash loadbalancing via :ref:`consistent_hash_lb_config `. +* loadbalancing: added support for :ref:`retry host predicates ` in conjunction with consistent hashing load balancers (ring hash and maglev). * lua: added a parameter to `httpCall` that makes it possible to have the call be asynchronous. * lua: added moonjit support. * mongo: the stat emitted for queries without a max time set in the :ref:`MongoDB filter` was modified to emit correctly for Mongo v3.2+. +* network filters: added a :ref:`direct response filter `. * network filters: network filter extensions use the "envoy.filters.network" name space. A mapping of extension names is available in the :ref:`deprecated ` documentation. -* network filters: added a :ref:`direct response filter `. -* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. -* retry: added a retry predicate that :ref:`rejects hosts based on metadata. ` +* rbac: added :ref:`remote_ip ` and :ref:`direct_remote_ip ` for matching downstream remote IP address. +* rbac: deprecated :ref:`source_ip ` with :ref:`direct_remote_ip ` and :ref:`remote_ip `. +* request_id_extension: added an ability to extend request ID handling at :ref:`HTTP connection manager`. +* retry: added a retry predicate that :ref:`rejects hosts based on metadata. `. +* router: added ability to set attempt count in downstream response, see :ref:`virtual host's include response + attempt count config `. +* router: added additional stats for :ref:`virtual clusters `. * router: added :ref:`auto_san_validation ` to support overrriding SAN validation to transport socket for new upstream connections based on the downstream HTTP host/authority header. * router: added the ability to match a route based on whether a downstream TLS connection certificate has been :ref:`validated `. * router: added support for :ref:`regex_rewrite ` for path rewriting using regular expressions and capture groups. +* router: added support for `%DOWNSTREAM_LOCAL_PORT%` :ref:`header formatter `. * router: don't ignore :ref:`per_try_timeout ` when the :ref:`global route timeout ` is disabled. -* router: added ability to set attempt count in downstream response, see :ref:`virtual host's include response - attempt count config `. * router: strip whitespace for :ref:`retry_on `, :ref:`grpc-retry-on header ` and :ref:`retry-on header `. -* runtime: enabling the runtime feature "envoy.deprecated_features.allow_deprecated_extension_names" +* runtime: enabling the runtime feature `envoy.deprecated_features.allow_deprecated_extension_names` disables the use of deprecated extension names. * runtime: integer values may now be parsed as booleans. * sds: added :ref:`GenericSecret ` to support secret of generic type. -* sds: fix the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. +* sds: added :ref:`certificate rotation ` support for certificates in static resources. +* server: the SIGUSR1 access log reopen warning now is logged at info level. * stat sinks: stat sink extensions use the "envoy.stat_sinks" name space. A mapping of extension names is available in the :ref:`deprecated ` documentation. -* thrift_proxy: add router filter stats to docs. -* tracers: tracer extensions use the "envoy.tracers" name space. A mapping of extension names is - available in the :ref:`deprecated ` documentation. +* thrift_proxy: added router filter stats to docs. +* tls: added configuration to disable stateless TLS session resumption :ref:`disable_stateless_session_resumption `. * tracing: added gRPC service configuration to the OpenCensus Stackdriver and OpenCensus Agent tracers. +* tracing: tracer extensions use the "envoy.tracers" name space. A mapping of extension names is + available in the :ref:`deprecated ` documentation. +* upstream: added ``upstream_rq_retry_limit_exceeded`` to :ref:`cluster `, and :ref:`virtual cluster ` stats. +* upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode`. * upstream: combined HTTP/1 and HTTP/2 connection pool code. This means that circuit breaker limits for both requests and connections apply to both pool types. Also, HTTP/2 now has the option to limit concurrent requests on a connection, and allow multiple draining connections. The old behavior is deprecated, but can be used during the deprecation - period by disabling runtime feature "envoy.reloadable_features.new_http1_connection_pool_behavior" or - "envoy.reloadable_features.new_http2_connection_pool_behavior" and then re-configure your clusters or + period by disabling runtime feature `envoy.reloadable_features.new_http1_connection_pool_behavior` or + `envoy.reloadable_features.new_http2_connection_pool_behavior` and then re-configure your clusters or restart Envoy. The behavior will not switch until the connection pools are recreated. The new circuit breaker behavior is described :ref:`here `. -* upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode`. +* zlib: by default zlib is initialized to use its default strategy (Z_DEFAULT_STRATEGY) + instead of the fixed one (Z_FIXED). The difference is that the use of dynamic + Huffman codes is enabled now resulting in better compression ratio for normal data. + +1.13.1 (March 3, 2020) +====================== +* buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation. +* http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection`. +* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. +* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. +* sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. 1.13.0 (January 20, 2020) ========================= @@ -133,6 +170,14 @@ Version history * tracing: added tags for gRPC request path, authority, content-type and timeout. * udp: added initial support for :ref:`UDP proxy ` +1.12.3 (March 3, 2020) +====================== +* buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation. +* http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature `envoy.reloadable_features.http1_flood_protection`. +* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. +* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. +* sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. + 1.12.2 (December 10, 2019) ========================== * http: fixed CVE-2019-18801 by allocating sufficient memory for request headers. diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD index d0f123a3b556..334fc8de6138 100644 --- a/generated_api_shadow/BUILD +++ b/generated_api_shadow/BUILD @@ -60,7 +60,6 @@ proto_library( "//envoy/config/filter/http/squash/v2:pkg", "//envoy/config/filter/http/tap/v2alpha:pkg", "//envoy/config/filter/http/transcoder/v2:pkg", - "//envoy/config/filter/http/wasm/v2:pkg", "//envoy/config/filter/listener/http_inspector/v2:pkg", "//envoy/config/filter/listener/original_dst/v2:pkg", "//envoy/config/filter/listener/original_src/v2alpha1:pkg", @@ -81,7 +80,6 @@ proto_library( "//envoy/config/filter/network/sni_cluster/v2:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", - "//envoy/config/filter/network/wasm/v2:pkg", "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index 5d0217bd2be6..c275a8c65835 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "db4b343e48c1264bb4d9ff491b059300701dc7c7" # Jan 24, 2020 -UDPA_SHA256 = "800624f44592a24898f133e39ae7fbb7a6c4b85bdddd448185fb7e277f097a56" +UDPA_GIT_SHA = "e8cd3a4bb307e2c810cffff99f93e96e6d7fee85" # Mar 27, 2020 +UDPA_SHA256 = "1fd7857cb61daee7726fca8f4d55e4923774a8d00a53007a4093830dc0482685" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" diff --git a/generated_api_shadow/envoy/admin/v2alpha/BUILD b/generated_api_shadow/envoy/admin/v2alpha/BUILD index a7253df510f8..1d38be06555c 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/BUILD +++ b/generated_api_shadow/envoy/admin/v2alpha/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/config/bootstrap/v2:pkg", "//envoy/service/tap/v2alpha:pkg", "//envoy/type:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/admin/v2alpha/certs.proto b/generated_api_shadow/envoy/admin/v2alpha/certs.proto index df84f0b5d7eb..c7b568ca1e58 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/certs.proto +++ b/generated_api_shadow/envoy/admin/v2alpha/certs.proto @@ -4,9 +4,12 @@ package envoy.admin.v2alpha; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "CertsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Certificates] diff --git a/generated_api_shadow/envoy/admin/v2alpha/clusters.proto b/generated_api_shadow/envoy/admin/v2alpha/clusters.proto index 4a05a40fe3ca..3b7ec029aa63 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/clusters.proto +++ b/generated_api_shadow/envoy/admin/v2alpha/clusters.proto @@ -8,9 +8,12 @@ import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/health_check.proto"; import "envoy/type/percent.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ClustersProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Clusters] diff --git a/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto b/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto index e5ddc6cce62a..833c015fb474 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto +++ b/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto @@ -7,9 +7,12 @@ import "envoy/config/bootstrap/v2/bootstrap.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ConfigDumpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: ConfigDump] diff --git a/generated_api_shadow/envoy/admin/v2alpha/listeners.proto b/generated_api_shadow/envoy/admin/v2alpha/listeners.proto index 8fee45093065..ca7b736521d0 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/listeners.proto +++ b/generated_api_shadow/envoy/admin/v2alpha/listeners.proto @@ -4,9 +4,12 @@ package envoy.admin.v2alpha; import "envoy/api/v2/core/address.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ListenersProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listeners] diff --git a/generated_api_shadow/envoy/admin/v2alpha/memory.proto b/generated_api_shadow/envoy/admin/v2alpha/memory.proto index 1544cd111dfd..85fd2169d6d7 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/memory.proto +++ b/generated_api_shadow/envoy/admin/v2alpha/memory.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.admin.v2alpha; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "MemoryProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Memory] diff --git a/generated_api_shadow/envoy/admin/v2alpha/metrics.proto b/generated_api_shadow/envoy/admin/v2alpha/metrics.proto index 79c15f72b2ec..15ad219c13e5 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/metrics.proto +++ b/generated_api_shadow/envoy/admin/v2alpha/metrics.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.admin.v2alpha; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "MetricsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metrics] diff --git a/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto b/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto index 1b725a11143a..22c65f3de5a6 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto +++ b/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.admin.v2alpha; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "MutexStatsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: MutexStats] diff --git a/generated_api_shadow/envoy/admin/v2alpha/server_info.proto b/generated_api_shadow/envoy/admin/v2alpha/server_info.proto index 1052cb6296ee..b9db6bbc1e1f 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/server_info.proto +++ b/generated_api_shadow/envoy/admin/v2alpha/server_info.proto @@ -5,10 +5,12 @@ package envoy.admin.v2alpha; import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ServerInfoProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Server State] diff --git a/generated_api_shadow/envoy/admin/v2alpha/tap.proto b/generated_api_shadow/envoy/admin/v2alpha/tap.proto index d16ffdd711db..6335b4db6284 100644 --- a/generated_api_shadow/envoy/admin/v2alpha/tap.proto +++ b/generated_api_shadow/envoy/admin/v2alpha/tap.proto @@ -4,11 +4,13 @@ package envoy.admin.v2alpha; import "envoy/service/tap/v2alpha/common.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap] diff --git a/generated_api_shadow/envoy/admin/v3/certs.proto b/generated_api_shadow/envoy/admin/v3/certs.proto index 1b28c2eb5e2c..158c8aead28f 100644 --- a/generated_api_shadow/envoy/admin/v3/certs.proto +++ b/generated_api_shadow/envoy/admin/v3/certs.proto @@ -4,11 +4,13 @@ package envoy.admin.v3; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "CertsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Certificates] diff --git a/generated_api_shadow/envoy/admin/v3/clusters.proto b/generated_api_shadow/envoy/admin/v3/clusters.proto index 47f8df0852b1..fc05c8a10de2 100644 --- a/generated_api_shadow/envoy/admin/v3/clusters.proto +++ b/generated_api_shadow/envoy/admin/v3/clusters.proto @@ -8,11 +8,13 @@ import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/type/v3/percent.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ClustersProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Clusters] diff --git a/generated_api_shadow/envoy/admin/v3/config_dump.proto b/generated_api_shadow/envoy/admin/v3/config_dump.proto index 6e2e5952b3e5..b3c3836a8cc0 100644 --- a/generated_api_shadow/envoy/admin/v3/config_dump.proto +++ b/generated_api_shadow/envoy/admin/v3/config_dump.proto @@ -7,11 +7,13 @@ import "envoy/config/bootstrap/v3/bootstrap.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ConfigDumpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: ConfigDump] diff --git a/generated_api_shadow/envoy/admin/v3/listeners.proto b/generated_api_shadow/envoy/admin/v3/listeners.proto index 690d1a4d27f5..6197a44e4243 100644 --- a/generated_api_shadow/envoy/admin/v3/listeners.proto +++ b/generated_api_shadow/envoy/admin/v3/listeners.proto @@ -4,11 +4,13 @@ package envoy.admin.v3; import "envoy/config/core/v3/address.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ListenersProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listeners] diff --git a/generated_api_shadow/envoy/admin/v3/memory.proto b/generated_api_shadow/envoy/admin/v3/memory.proto index 44ef011e4cf5..bcf9f271748d 100644 --- a/generated_api_shadow/envoy/admin/v3/memory.proto +++ b/generated_api_shadow/envoy/admin/v3/memory.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.admin.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "MemoryProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Memory] diff --git a/generated_api_shadow/envoy/admin/v3/metrics.proto b/generated_api_shadow/envoy/admin/v3/metrics.proto index 3a2bd8f27f12..71592ac1e9ec 100644 --- a/generated_api_shadow/envoy/admin/v3/metrics.proto +++ b/generated_api_shadow/envoy/admin/v3/metrics.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.admin.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "MetricsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metrics] diff --git a/generated_api_shadow/envoy/admin/v3/mutex_stats.proto b/generated_api_shadow/envoy/admin/v3/mutex_stats.proto index d0a2ca08efff..49965d87ae80 100644 --- a/generated_api_shadow/envoy/admin/v3/mutex_stats.proto +++ b/generated_api_shadow/envoy/admin/v3/mutex_stats.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.admin.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "MutexStatsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: MutexStats] diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto index c4541da1aa44..d412a7f011de 100644 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ b/generated_api_shadow/envoy/admin/v3/server_info.proto @@ -4,13 +4,14 @@ package envoy.admin.v3; import "google/protobuf/duration.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ServerInfoProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Server State] @@ -136,13 +137,6 @@ message CommandLineOptions { // See :option:`--mode` for details. Mode mode = 19; - // max_stats and max_obj_name_len are now unused and have no effect. - uint64 hidden_envoy_deprecated_max_stats = 20 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - uint64 hidden_envoy_deprecated_max_obj_name_len = 21 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // See :option:`--disable-hot-restart` for details. bool disable_hot_restart = 22; @@ -157,4 +151,10 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; + + uint64 hidden_envoy_deprecated_max_stats = 20 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + uint64 hidden_envoy_deprecated_max_obj_name_len = 21 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } diff --git a/generated_api_shadow/envoy/admin/v3/tap.proto b/generated_api_shadow/envoy/admin/v3/tap.proto index 094753a6a490..ca7ab4405a9b 100644 --- a/generated_api_shadow/envoy/admin/v3/tap.proto +++ b/generated_api_shadow/envoy/admin/v3/tap.proto @@ -4,13 +4,14 @@ package envoy.admin.v3; import "envoy/config/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap] diff --git a/generated_api_shadow/envoy/admin/v4alpha/BUILD b/generated_api_shadow/envoy/admin/v4alpha/BUILD new file mode 100644 index 000000000000..6da5b60bad28 --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/BUILD @@ -0,0 +1,17 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/admin/v3:pkg", + "//envoy/annotations:pkg", + "//envoy/config/bootstrap/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/tap/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/admin/v4alpha/certs.proto b/generated_api_shadow/envoy/admin/v4alpha/certs.proto new file mode 100644 index 000000000000..585b09bccf4c --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/certs.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "google/protobuf/timestamp.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "CertsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Certificates] + +// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to +// display certificate information. See :ref:`/certs ` for more +// information. +message Certificates { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificates"; + + // List of certificates known to an Envoy. + repeated Certificate certificates = 1; +} + +message Certificate { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificate"; + + // Details of CA certificate. + repeated CertificateDetails ca_cert = 1; + + // Details of Certificate Chain + repeated CertificateDetails cert_chain = 2; +} + +// [#next-free-field: 7] +message CertificateDetails { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CertificateDetails"; + + // Path of the certificate. + string path = 1; + + // Certificate Serial Number. + string serial_number = 2; + + // List of Subject Alternate names. + repeated SubjectAlternateName subject_alt_names = 3; + + // Minimum of days until expiration of certificate and it's chain. + uint64 days_until_expiration = 4; + + // Indicates the time from which the certificate is valid. + google.protobuf.Timestamp valid_from = 5; + + // Indicates the time at which the certificate expires. + google.protobuf.Timestamp expiration_time = 6; +} + +message SubjectAlternateName { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.SubjectAlternateName"; + + // Subject Alternate Name. + oneof name { + string dns = 1; + + string uri = 2; + + string ip_address = 3; + } +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/clusters.proto b/generated_api_shadow/envoy/admin/v4alpha/clusters.proto new file mode 100644 index 000000000000..9056262cae86 --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/clusters.proto @@ -0,0 +1,162 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "envoy/admin/v4alpha/metrics.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/health_check.proto"; +import "envoy/type/v3/percent.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "ClustersProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Clusters] + +// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. +// See :ref:`/clusters ` for more information. +message Clusters { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Clusters"; + + // Mapping from cluster name to each cluster's status. + repeated ClusterStatus cluster_statuses = 1; +} + +// Details an individual cluster's current status. +// [#next-free-field: 6] +message ClusterStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClusterStatus"; + + // Name of the cluster. + string name = 1; + + // Denotes whether this cluster was added via API or configured statically. + bool added_via_api = 2; + + // The success rate threshold used in the last interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *false*, all errors: externally and locally generated were used to calculate the threshold. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*, only externally generated errors were used to calculate the threshold. + // The threshold is used to eject hosts based on their success rate. See + // :ref:`Cluster outlier detection ` documentation for details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + type.v3.Percent success_rate_ejection_threshold = 3; + + // Mapping from host address to the host's current status. + repeated HostStatus host_statuses = 4; + + // The success rate threshold used in the last interval when only locally originated failures were + // taken into account and externally originated errors were treated as success. + // This field should be interpreted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*. The threshold is used to eject hosts based on their success rate. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + type.v3.Percent local_origin_success_rate_ejection_threshold = 5; +} + +// Current state of a particular host. +// [#next-free-field: 10] +message HostStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostStatus"; + + // Address of this host. + config.core.v4alpha.Address address = 1; + + // List of stats specific to this host. + repeated SimpleMetric stats = 2; + + // The host's current health status. + HostHealthStatus health_status = 3; + + // Request success rate for this host over the last calculated interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *false*, all errors: externally and locally generated were used in success rate + // calculation. If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*, only externally generated errors were used in success rate calculation. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + type.v3.Percent success_rate = 4; + + // The host's weight. If not configured, the value defaults to 1. + uint32 weight = 5; + + // The hostname of the host, if applicable. + string hostname = 6; + + // The host's priority. If not configured, the value defaults to 0 (highest priority). + uint32 priority = 7; + + // Request success rate for this host over the last calculated + // interval when only locally originated errors are taken into account and externally originated + // errors were treated as success. + // This field should be interpreted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is *true*. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + type.v3.Percent local_origin_success_rate = 8; + + // locality of the host. + config.core.v4alpha.Locality locality = 9; +} + +// Health status for a host. +// [#next-free-field: 7] +message HostHealthStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostHealthStatus"; + + // The host is currently failing active health checks. + bool failed_active_health_check = 1; + + // The host is currently considered an outlier and has been ejected. + bool failed_outlier_check = 2; + + // The host is currently being marked as degraded through active health checking. + bool failed_active_degraded_check = 4; + + // The host has been removed from service discovery, but is being stabilized due to active + // health checking. + bool pending_dynamic_removal = 5; + + // The host has not yet been health checked. + bool pending_active_hc = 6; + + // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported + // here. + // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] + config.core.v4alpha.HealthStatus eds_health_status = 3; +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto b/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto new file mode 100644 index 000000000000..02709a414506 --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto @@ -0,0 +1,342 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "envoy/config/bootstrap/v4alpha/bootstrap.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "ConfigDumpProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: ConfigDump] + +// The :ref:`/config_dump ` admin endpoint uses this wrapper +// message to maintain and serve arbitrary configuration information from any component in Envoy. +message ConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ConfigDump"; + + // This list is serialized and dumped in its entirety at the + // :ref:`/config_dump ` endpoint. + // + // The following configurations are currently supported and will be dumped in the order given + // below: + // + // * *bootstrap*: :ref:`BootstrapConfigDump ` + // * *clusters*: :ref:`ClustersConfigDump ` + // * *listeners*: :ref:`ListenersConfigDump ` + // * *routes*: :ref:`RoutesConfigDump ` + // + // You can filter output with the resource and mask query parameters. + // See :ref:`/config_dump?resource={} `, + // :ref:`/config_dump?mask={} `, + // or :ref:`/config_dump?resource={},mask={} + // ` for more information. + repeated google.protobuf.Any configs = 1; +} + +message UpdateFailureState { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.UpdateFailureState"; + + // What the component configuration would have been if the update had succeeded. + google.protobuf.Any failed_configuration = 1; + + // Time of the latest failed update attempt. + google.protobuf.Timestamp last_update_attempt = 2; + + // Details about the last failed update attempt. + string details = 3; +} + +// This message describes the bootstrap configuration that Envoy was started with. This includes +// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate +// the static portions of an Envoy configuration by reusing the output as the bootstrap +// configuration for another Envoy. +message BootstrapConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.BootstrapConfigDump"; + + config.bootstrap.v4alpha.Bootstrap bootstrap = 1; + + // The timestamp when the BootstrapConfig was last updated. + google.protobuf.Timestamp last_updated = 2; +} + +// Envoy's listener manager fills this message with all currently known listeners. Listener +// configuration information can be used to recreate an Envoy configuration by populating all +// listeners as static listeners or by returning them in a LDS response. +message ListenersConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenersConfigDump"; + + // Describes a statically loaded listener. + message StaticListener { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ListenersConfigDump.StaticListener"; + + // The listener config. + google.protobuf.Any listener = 1; + + // The timestamp when the Listener was last successfully updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicListenerState { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ListenersConfigDump.DynamicListenerState"; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the listener was loaded. In the future, discrete per-listener versions may be supported + // by the API. + string version_info = 1; + + // The listener config. + google.protobuf.Any listener = 2; + + // The timestamp when the Listener was last successfully updated. + google.protobuf.Timestamp last_updated = 3; + } + + // Describes a dynamically loaded listener via the LDS API. + // [#next-free-field: 6] + message DynamicListener { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ListenersConfigDump.DynamicListener"; + + // The name or unique id of this listener, pulled from the DynamicListenerState config. + string name = 1; + + // The listener state for any active listener by this name. + // These are listeners that are available to service data plane traffic. + DynamicListenerState active_state = 2; + + // The listener state for any warming listener by this name. + // These are listeners that are currently undergoing warming in preparation to service data + // plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the warming listeners should generally be discarded. + DynamicListenerState warming_state = 3; + + // The listener state for any draining listener by this name. + // These are listeners that are currently undergoing draining in preparation to stop servicing + // data plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the draining listeners should generally be discarded. + DynamicListenerState draining_state = 4; + + // Set if the last update failed, cleared after the next successful update. + UpdateFailureState error_state = 5; + } + + // This is the :ref:`version_info ` in the + // last processed LDS discovery response. If there are only static bootstrap listeners, this field + // will be "". + string version_info = 1; + + // The statically loaded listener configs. + repeated StaticListener static_listeners = 2; + + // State for any warming, active, or draining listeners. + repeated DynamicListener dynamic_listeners = 3; +} + +// Envoy's cluster manager fills this message with all currently known clusters. Cluster +// configuration information can be used to recreate an Envoy configuration by populating all +// clusters as static clusters or by returning them in a CDS response. +message ClustersConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClustersConfigDump"; + + // Describes a statically loaded cluster. + message StaticCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ClustersConfigDump.StaticCluster"; + + // The cluster config. + google.protobuf.Any cluster = 1; + + // The timestamp when the Cluster was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + // Describes a dynamically loaded cluster via the CDS API. + message DynamicCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ClustersConfigDump.DynamicCluster"; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by + // the API. + string version_info = 1; + + // The cluster config. + google.protobuf.Any cluster = 2; + + // The timestamp when the Cluster was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // This is the :ref:`version_info ` in the + // last processed CDS discovery response. If there are only static bootstrap clusters, this field + // will be "". + string version_info = 1; + + // The statically loaded cluster configs. + repeated StaticCluster static_clusters = 2; + + // The dynamically loaded active clusters. These are clusters that are available to service + // data plane traffic. + repeated DynamicCluster dynamic_active_clusters = 3; + + // The dynamically loaded warming clusters. These are clusters that are currently undergoing + // warming in preparation to service data plane traffic. Note that if attempting to recreate an + // Envoy configuration from a configuration dump, the warming clusters should generally be + // discarded. + repeated DynamicCluster dynamic_warming_clusters = 4; +} + +// Envoy's RDS implementation fills this message with all currently loaded routes, as described by +// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration +// or defined inline while configuring listeners are separated from those configured dynamically via RDS. +// Route configuration information can be used to recreate an Envoy configuration by populating all routes +// as static routes or by returning them in RDS responses. +message RoutesConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.RoutesConfigDump"; + + message StaticRouteConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.RoutesConfigDump.StaticRouteConfig"; + + // The route config. + google.protobuf.Any route_config = 1; + + // The timestamp when the Route was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicRouteConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig"; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the route configuration was loaded. + string version_info = 1; + + // The route config. + google.protobuf.Any route_config = 2; + + // The timestamp when the Route was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded route configs. + repeated StaticRouteConfig static_route_configs = 2; + + // The dynamically loaded route configs. + repeated DynamicRouteConfig dynamic_route_configs = 3; +} + +// Envoy's scoped RDS implementation fills this message with all currently loaded route +// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both +// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the +// dynamically obtained scopes via the SRDS API. +message ScopedRoutesConfigDump { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ScopedRoutesConfigDump"; + + message InlineScopedRouteConfigs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs"; + + // The name assigned to the scoped route configurations. + string name = 1; + + // The scoped route configurations. + repeated google.protobuf.Any scoped_route_configs = 2; + + // The timestamp when the scoped route config set was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + message DynamicScopedRouteConfigs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs"; + + // The name assigned to the scoped route configurations. + string name = 1; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the scoped routes configuration was loaded. + string version_info = 2; + + // The scoped route configurations. + repeated google.protobuf.Any scoped_route_configs = 3; + + // The timestamp when the scoped route config set was last updated. + google.protobuf.Timestamp last_updated = 4; + } + + // The statically loaded scoped route configs. + repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; + + // The dynamically loaded scoped route configs. + repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; +} + +// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. +message SecretsConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SecretsConfigDump"; + + // DynamicSecret contains secret information fetched via SDS. + message DynamicSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.SecretsConfigDump.DynamicSecret"; + + // The name assigned to the secret. + string name = 1; + + // This is the per-resource version information. + string version_info = 2; + + // The timestamp when the secret was last updated. + google.protobuf.Timestamp last_updated = 3; + + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + google.protobuf.Any secret = 4; + } + + // StaticSecret specifies statically loaded secret in bootstrap. + message StaticSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.SecretsConfigDump.StaticSecret"; + + // The name assigned to the secret. + string name = 1; + + // The timestamp when the secret was last updated. + google.protobuf.Timestamp last_updated = 2; + + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + google.protobuf.Any secret = 3; + } + + // The statically loaded secrets. + repeated StaticSecret static_secrets = 1; + + // The dynamically loaded active secrets. These are secrets that are available to service + // clusters or listeners. + repeated DynamicSecret dynamic_active_secrets = 2; + + // The dynamically loaded warming secrets. These are secrets that are currently undergoing + // warming in preparation to service clusters or listeners. + repeated DynamicSecret dynamic_warming_secrets = 3; +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/listeners.proto b/generated_api_shadow/envoy/admin/v4alpha/listeners.proto new file mode 100644 index 000000000000..89bdc4c5bbf8 --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/listeners.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "ListenersProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listeners] + +// Admin endpoint uses this wrapper for `/listeners` to display listener status information. +// See :ref:`/listeners ` for more information. +message Listeners { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Listeners"; + + // List of listener statuses. + repeated ListenerStatus listener_statuses = 1; +} + +// Details an individual listener's current status. +message ListenerStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenerStatus"; + + // Name of the listener + string name = 1; + + // The actual local address that the listener is listening on. If a listener was configured + // to listen on port 0, then this address has the port that was allocated by the OS. + config.core.v4alpha.Address local_address = 2; +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/memory.proto b/generated_api_shadow/envoy/admin/v4alpha/memory.proto new file mode 100644 index 000000000000..d2f0b57229ce --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/memory.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "MemoryProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Memory] + +// Proto representation of the internal memory consumption of an Envoy instance. These represent +// values extracted from an internal TCMalloc instance. For more information, see the section of the +// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). +// [#next-free-field: 7] +message Memory { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Memory"; + + // The number of bytes allocated by the heap for Envoy. This is an alias for + // `generic.current_allocated_bytes`. + uint64 allocated = 1; + + // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for + // `generic.heap_size`. + uint64 heap_size = 2; + + // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards + // virtual memory usage, and depending on the OS, typically do not count towards physical memory + // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. + uint64 pageheap_unmapped = 3; + + // The number of bytes in free, mapped pages in the page heap. These bytes always count towards + // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also + // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. + uint64 pageheap_free = 4; + + // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias + // for `tcmalloc.current_total_thread_cache_bytes`. + uint64 total_thread_cache = 5; + + // The number of bytes of the physical memory usage by the allocator. This is an alias for + // `generic.total_physical_bytes`. + uint64 total_physical_bytes = 6; +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/metrics.proto b/generated_api_shadow/envoy/admin/v4alpha/metrics.proto new file mode 100644 index 000000000000..78613320038b --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/metrics.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "MetricsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metrics] + +// Proto representation of an Envoy Counter or Gauge value. +message SimpleMetric { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SimpleMetric"; + + enum Type { + COUNTER = 0; + GAUGE = 1; + } + + // Type of the metric represented. + Type type = 1; + + // Current metric value. + uint64 value = 2; + + // Name of the metric. + string name = 3; +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/mutex_stats.proto b/generated_api_shadow/envoy/admin/v4alpha/mutex_stats.proto new file mode 100644 index 000000000000..6f9fcd548cc0 --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/mutex_stats.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "MutexStatsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: MutexStats] + +// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run +// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` +// [docs](https://abseil.io/about/design/mutex#extra-features). +// +// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not +// correspond to core clock frequency. For more information, see the `CycleClock` +// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). +message MutexStats { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.MutexStats"; + + // The number of individual mutex contentions which have occurred since startup. + uint64 num_contentions = 1; + + // The length of the current contention wait cycle. + uint64 current_wait_cycles = 2; + + // The lifetime total of all contention wait cycles. + uint64 lifetime_wait_cycles = 3; +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto new file mode 100644 index 000000000000..867a9255bc51 --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto @@ -0,0 +1,155 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "google/protobuf/duration.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "ServerInfoProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Server State] + +// Proto representation of the value returned by /server_info, containing +// server version/server status information. +// [#next-free-field: 7] +message ServerInfo { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ServerInfo"; + + enum State { + // Server is live and serving traffic. + LIVE = 0; + + // Server is draining listeners in response to external health checks failing. + DRAINING = 1; + + // Server has not yet completed cluster manager initialization. + PRE_INITIALIZING = 2; + + // Server is running the cluster manager initialization callbacks (e.g., RDS). + INITIALIZING = 3; + } + + // Server version. + string version = 1; + + // State of the server. + State state = 2; + + // Uptime since current epoch was started. + google.protobuf.Duration uptime_current_epoch = 3; + + // Uptime since the start of the first epoch. + google.protobuf.Duration uptime_all_epochs = 4; + + // Hot restart version. + string hot_restart_version = 5; + + // Command line options the server is currently running with. + CommandLineOptions command_line_options = 6; +} + +// [#next-free-field: 29] +message CommandLineOptions { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; + + enum IpVersion { + v4 = 0; + v6 = 1; + } + + enum Mode { + // Validate configs and then serve traffic normally. + Serve = 0; + + // Validate configs and exit. + Validate = 1; + + // Completely load and initialize the config, and then exit without running the listener loop. + InitOnly = 2; + } + + reserved 12, 20, 21; + + reserved "max_stats", "max_obj_name_len"; + + // See :option:`--base-id` for details. + uint64 base_id = 1; + + // See :option:`--concurrency` for details. + uint32 concurrency = 2; + + // See :option:`--config-path` for details. + string config_path = 3; + + // See :option:`--config-yaml` for details. + string config_yaml = 4; + + // See :option:`--allow-unknown-static-fields` for details. + bool allow_unknown_static_fields = 5; + + // See :option:`--reject-unknown-dynamic-fields` for details. + bool reject_unknown_dynamic_fields = 26; + + // See :option:`--admin-address-path` for details. + string admin_address_path = 6; + + // See :option:`--local-address-ip-version` for details. + IpVersion local_address_ip_version = 7; + + // See :option:`--log-level` for details. + string log_level = 8; + + // See :option:`--component-log-level` for details. + string component_log_level = 9; + + // See :option:`--log-format` for details. + string log_format = 10; + + // See :option:`--log-format-escaped` for details. + bool log_format_escaped = 27; + + // See :option:`--log-path` for details. + string log_path = 11; + + // See :option:`--service-cluster` for details. + string service_cluster = 13; + + // See :option:`--service-node` for details. + string service_node = 14; + + // See :option:`--service-zone` for details. + string service_zone = 15; + + // See :option:`--file-flush-interval-msec` for details. + google.protobuf.Duration file_flush_interval = 16; + + // See :option:`--drain-time-s` for details. + google.protobuf.Duration drain_time = 17; + + // See :option:`--parent-shutdown-time-s` for details. + google.protobuf.Duration parent_shutdown_time = 18; + + // See :option:`--mode` for details. + Mode mode = 19; + + // See :option:`--disable-hot-restart` for details. + bool disable_hot_restart = 22; + + // See :option:`--enable-mutex-tracing` for details. + bool enable_mutex_tracing = 23; + + // See :option:`--restart-epoch` for details. + uint32 restart_epoch = 24; + + // See :option:`--cpuset-threads` for details. + bool cpuset_threads = 25; + + // See :option:`--disable-extensions` for details. + repeated string disabled_extensions = 28; +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/tap.proto b/generated_api_shadow/envoy/admin/v4alpha/tap.proto new file mode 100644 index 000000000000..c47b308d6ee6 --- /dev/null +++ b/generated_api_shadow/envoy/admin/v4alpha/tap.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.admin.v4alpha; + +import "envoy/config/tap/v3/common.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.admin.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap] + +// The /tap admin request body that is used to configure an active tap session. +message TapRequest { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.TapRequest"; + + // The opaque configuration ID used to match the configuration to a loaded extension. + // A tap extension configures a similar opaque ID that is used to match. + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The tap configuration to load. + config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/api/v2/BUILD b/generated_api_shadow/envoy/api/v2/BUILD index cc6f9e77c192..46f8d16dfbd7 100644 --- a/generated_api_shadow/envoy/api/v2/BUILD +++ b/generated_api_shadow/envoy/api/v2/BUILD @@ -14,6 +14,7 @@ api_proto_package( "//envoy/api/v2/endpoint:pkg", "//envoy/api/v2/listener:pkg", "//envoy/api/v2/route:pkg", + "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/listener/v2:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/generated_api_shadow/envoy/api/v2/auth/cert.proto b/generated_api_shadow/envoy/api/v2/auth/cert.proto index cdb6a3d168b0..a1642318e043 100644 --- a/generated_api_shadow/envoy/api/v2/auth/cert.proto +++ b/generated_api_shadow/envoy/api/v2/auth/cert.proto @@ -11,9 +11,9 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/sensitive.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; @@ -21,6 +21,7 @@ option java_outer_classname = "CertProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common TLS configuration] @@ -414,7 +415,7 @@ message UpstreamTlsContext { google.protobuf.UInt32Value max_session_keys = 4; } -// [#next-free-field: 7] +// [#next-free-field: 8] message DownstreamTlsContext { // Common TLS context settings. CommonTlsContext common_tls_context = 1; @@ -433,6 +434,16 @@ message DownstreamTlsContext { // Config for fetching TLS session ticket keys via SDS API. SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; } // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session diff --git a/generated_api_shadow/envoy/api/v2/cds.proto b/generated_api_shadow/envoy/api/v2/cds.proto index dcd5c3fd0fb0..0b657a0fa452 100644 --- a/generated_api_shadow/envoy/api/v2/cds.proto +++ b/generated_api_shadow/envoy/api/v2/cds.proto @@ -8,6 +8,7 @@ import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import public "envoy/api/v2/cluster.proto"; @@ -16,6 +17,7 @@ option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: CDS] diff --git a/generated_api_shadow/envoy/api/v2/cluster.proto b/generated_api_shadow/envoy/api/v2/cluster.proto index 55324ff60060..5de5c20df570 100644 --- a/generated_api_shadow/envoy/api/v2/cluster.proto +++ b/generated_api_shadow/envoy/api/v2/cluster.proto @@ -21,12 +21,14 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Cluster configuration] @@ -354,7 +356,7 @@ message Cluster { } // Common configuration for all load balancer implementations. - // [#next-free-field: 7] + // [#next-free-field: 8] message CommonLbConfig { // Configuration for :ref:`zone aware routing // `. @@ -384,6 +386,13 @@ message Cluster { message LocalityWeightedLbConfig { } + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + message ConsistentHashingLbConfig { + // If set to `true`, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + bool use_hostname_for_hashing = 1; + } + // Configures the :ref:`healthy panic threshold `. // If not specified, the default is 50%. // To disable panic mode, set to 0%. @@ -438,6 +447,9 @@ message Cluster { // If set to `true`, the cluster manager will drain all existing // connections to upstream hosts whenever hosts are added or removed from the cluster. bool close_connections_on_host_set_change = 6; + + //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig consistent_hashing_lb_config = 7; } message RefreshRate { diff --git a/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto b/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto index 893d1f1aa168..510619b26429 100644 --- a/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto +++ b/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto @@ -8,6 +8,7 @@ import "envoy/type/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; @@ -16,6 +17,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Circuit breakers] diff --git a/generated_api_shadow/envoy/api/v2/cluster/filter.proto b/generated_api_shadow/envoy/api/v2/cluster/filter.proto index 67f3c3ba5e09..b87ad79d8f35 100644 --- a/generated_api_shadow/envoy/api/v2/cluster/filter.proto +++ b/generated_api_shadow/envoy/api/v2/cluster/filter.proto @@ -5,6 +5,7 @@ package envoy.api.v2.cluster; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; @@ -13,6 +14,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Upstream filters] // Upstream filters apply to the connections to the upstream cluster hosts. diff --git a/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto b/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto index 0cc638ceb493..6cf35e41ff15 100644 --- a/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto +++ b/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto @@ -6,6 +6,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; @@ -14,6 +15,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Outlier detection] diff --git a/generated_api_shadow/envoy/api/v2/core/address.proto b/generated_api_shadow/envoy/api/v2/core/address.proto index e5c1f1c9d48e..804da539583b 100644 --- a/generated_api_shadow/envoy/api/v2/core/address.proto +++ b/generated_api_shadow/envoy/api/v2/core/address.proto @@ -7,12 +7,14 @@ import "envoy/api/v2/core/socket_option.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "AddressProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Network addresses] diff --git a/generated_api_shadow/envoy/api/v2/core/backoff.proto b/generated_api_shadow/envoy/api/v2/core/backoff.proto index b46791500297..e45c71e39be8 100644 --- a/generated_api_shadow/envoy/api/v2/core/backoff.proto +++ b/generated_api_shadow/envoy/api/v2/core/backoff.proto @@ -5,12 +5,14 @@ package envoy.api.v2.core; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "BackoffProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Backoff Strategy] diff --git a/generated_api_shadow/envoy/api/v2/core/base.proto b/generated_api_shadow/envoy/api/v2/core/base.proto index d10163b3bdf6..b7145d77efd3 100644 --- a/generated_api_shadow/envoy/api/v2/core/base.proto +++ b/generated_api_shadow/envoy/api/v2/core/base.proto @@ -14,6 +14,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/core/socket_option.proto"; @@ -22,6 +23,7 @@ option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "BaseProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common types] @@ -230,6 +232,15 @@ message RuntimeUInt32 { string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; } +// Runtime derived double with a default when not specified. +message RuntimeDouble { + // Default value if runtime value is not available. + double default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; +} + // Runtime derived bool with a default when not specified. message RuntimeFeatureFlag { // Default value if runtime value is not available. diff --git a/generated_api_shadow/envoy/api/v2/core/config_source.proto b/generated_api_shadow/envoy/api/v2/core/config_source.proto index 60949ca1c8e5..fa42a7aeec1c 100644 --- a/generated_api_shadow/envoy/api/v2/core/config_source.proto +++ b/generated_api_shadow/envoy/api/v2/core/config_source.proto @@ -9,12 +9,14 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "ConfigSourceProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Configuration sources] @@ -133,6 +135,8 @@ message ConfigSource { option (validate.required) = true; // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for :ref:`secret `, + // the certificate and key files are also watched for updates. // // .. note:: // diff --git a/generated_api_shadow/envoy/api/v2/core/event_service_config.proto b/generated_api_shadow/envoy/api/v2/core/event_service_config.proto new file mode 100644 index 000000000000..f822f8c6b630 --- /dev/null +++ b/generated_api_shadow/envoy/api/v2/core/event_service_config.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.api.v2.core; + +import "envoy/api/v2/core/grpc_service.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "EventServiceConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#not-implemented-hide:] +// Configuration of the event reporting service endpoint. +message EventServiceConfig { + oneof config_source_specifier { + option (validate.required) = true; + + // Specifies the gRPC service that hosts the event reporting service. + GrpcService grpc_service = 1; + } +} diff --git a/generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto b/generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto new file mode 100644 index 000000000000..3d646484b359 --- /dev/null +++ b/generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.api.v2.core; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "GrpcMethodListProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: gRPC method list] + +// A list of gRPC methods which can be used as an allowlist, for example. +message GrpcMethodList { + message Service { + // The name of the gRPC service. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The names of the gRPC methods in this service. + repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; + } + + repeated Service services = 1; +} diff --git a/generated_api_shadow/envoy/api/v2/core/grpc_service.proto b/generated_api_shadow/envoy/api/v2/core/grpc_service.proto index 6fda81e3a209..dd789644e1d7 100644 --- a/generated_api_shadow/envoy/api/v2/core/grpc_service.proto +++ b/generated_api_shadow/envoy/api/v2/core/grpc_service.proto @@ -9,15 +9,16 @@ import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; -import "udpa/annotations/sensitive.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "GrpcServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC services] diff --git a/generated_api_shadow/envoy/api/v2/core/health_check.proto b/generated_api_shadow/envoy/api/v2/core/health_check.proto index 91aeb76b8b42..bc4ae3e5c866 100644 --- a/generated_api_shadow/envoy/api/v2/core/health_check.proto +++ b/generated_api_shadow/envoy/api/v2/core/health_check.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.api.v2.core; import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/event_service_config.proto"; import "envoy/type/http.proto"; import "envoy/type/matcher/string.proto"; import "envoy/type/range.proto"; @@ -14,12 +15,14 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health check] // * Health checking :ref:`architecture overview `. @@ -52,7 +55,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 22] +// [#next-free-field: 23] message HealthCheck { // Describes the encoding of the payload bytes in the payload. message Payload { @@ -71,7 +74,8 @@ message HealthCheck { message HttpHealthCheck { // The value of the host header in the HTTP health check request. If // left empty (default value), the name of the cluster this health check is associated - // with will be used. + // with will be used. The host header can be customized for a specific endpoint by setting the + // :ref:`hostname ` field. string host = 1; // Specifies the HTTP path that will be requested during health checking. For example @@ -158,7 +162,8 @@ message HealthCheck { // The value of the :authority header in the gRPC health check request. If // left empty (default value), the name of the cluster this health check is associated - // with will be used. + // with will be used. The authority header can be customized for a specific endpoint by setting + // the :ref:`hostname ` field. string authority = 2; } @@ -288,6 +293,11 @@ message HealthCheck { // If empty, no event log will be written. string event_log_path = 17; + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventServiceConfig event_service = 22; + // If set to true, health check failure events will always be logged. If set to false, only the // initial health check failure event will be logged. // The default value is false. diff --git a/generated_api_shadow/envoy/api/v2/core/http_uri.proto b/generated_api_shadow/envoy/api/v2/core/http_uri.proto index 5f740695dd80..cd1a0660e330 100644 --- a/generated_api_shadow/envoy/api/v2/core/http_uri.proto +++ b/generated_api_shadow/envoy/api/v2/core/http_uri.proto @@ -5,12 +5,14 @@ package envoy.api.v2.core; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "HttpUriProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP Service URI ] diff --git a/generated_api_shadow/envoy/api/v2/core/protocol.proto b/generated_api_shadow/envoy/api/v2/core/protocol.proto index c8cfcf8260ac..5838ca744075 100644 --- a/generated_api_shadow/envoy/api/v2/core/protocol.proto +++ b/generated_api_shadow/envoy/api/v2/core/protocol.proto @@ -6,12 +6,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "ProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Protocol options] @@ -32,9 +34,30 @@ message UpstreamHttpProtocolOptions { bool auto_san_validation = 2; } +// [#next-free-field: 6] message HttpProtocolOptions { + // Action to take when Envoy receives client request with header names containing underscore + // characters. + // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + // characters. + enum HeadersWithUnderscoresAction { + // Allow headers with underscores. This is the default behavior. + ALLOW = 0; + + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + REJECT_REQUEST = 1; + + // Drop the header with name containing underscores. The header is dropped before the filter chain is + // invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + DROP_HEADER = 2; + } + // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. If not set, there is no idle timeout. When the + // period in which there are no active requests. When the // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 // downstream connection a drain sequence will occur prior to closing the connection, see // :ref:`drain_timeout @@ -65,6 +88,11 @@ message HttpProtocolOptions { // The current implementation implements this timeout on downstream connections only. // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; + + // Action to take when a client request with a header name containing underscore characters is received. + // If this setting is not specified, the value defaults to ALLOW. + // Note: upstream responses are not affected by this setting. + HeadersWithUnderscoresAction headers_with_underscores_action = 5; } // [#next-free-field: 6] diff --git a/generated_api_shadow/envoy/api/v2/core/socket_option.proto b/generated_api_shadow/envoy/api/v2/core/socket_option.proto index 9a044d1a9eb9..39678ad1b8bc 100644 --- a/generated_api_shadow/envoy/api/v2/core/socket_option.proto +++ b/generated_api_shadow/envoy/api/v2/core/socket_option.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.api.v2.core; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "SocketOptionProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Socket Option ] diff --git a/generated_api_shadow/envoy/api/v2/discovery.proto b/generated_api_shadow/envoy/api/v2/discovery.proto index 0794f82aa9d2..da2690f867fc 100644 --- a/generated_api_shadow/envoy/api/v2/discovery.proto +++ b/generated_api_shadow/envoy/api/v2/discovery.proto @@ -8,11 +8,13 @@ import "google/protobuf/any.proto"; import "google/rpc/status.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "DiscoveryProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.discovery.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common discovery API components] diff --git a/generated_api_shadow/envoy/api/v2/eds.proto b/generated_api_shadow/envoy/api/v2/eds.proto index 0917940aee84..b0d5c7c47370 100644 --- a/generated_api_shadow/envoy/api/v2/eds.proto +++ b/generated_api_shadow/envoy/api/v2/eds.proto @@ -10,6 +10,7 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/endpoint.proto"; @@ -19,6 +20,7 @@ option java_outer_classname = "EdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.endpoint.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` diff --git a/generated_api_shadow/envoy/api/v2/endpoint.proto b/generated_api_shadow/envoy/api/v2/endpoint.proto index 87d8713e8e1f..e233b0e7d34e 100644 --- a/generated_api_shadow/envoy/api/v2/endpoint.proto +++ b/generated_api_shadow/envoy/api/v2/endpoint.proto @@ -10,12 +10,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Endpoint configuration] // Endpoint discovery :ref:`architecture overview ` diff --git a/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto b/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto index 247c9ae265a5..9724fd72818d 100644 --- a/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto +++ b/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.api.v2.endpoint; +import "udpa/annotations/status.proto"; + import public "envoy/api/v2/endpoint/endpoint_components.proto"; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; diff --git a/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto b/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto index 5d2fe527588b..d7f209311697 100644 --- a/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto +++ b/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto @@ -9,12 +9,14 @@ import "envoy/api/v2/core/health_check.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; option java_outer_classname = "EndpointComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Endpoints] @@ -29,6 +31,13 @@ message Endpoint { // check port. Setting this with a non-zero value allows an upstream host // to have different health check address port. uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; + + // By default, the host header for L7 health checks is controlled by cluster level configuration + // (see: :ref:`host ` and + // :ref:`authority `). Setting this + // to a non-empty value allows overriding the cluster level configuration for a specific + // endpoint. + string hostname = 2; } // The upstream host address. @@ -50,6 +59,12 @@ message Endpoint { // This takes into effect only for upstream clusters with // :ref:`active health checking ` enabled. HealthCheckConfig health_check_config = 2; + + // The hostname associated with this endpoint. This hostname is not used for routing or address + // resolution. If provided, it will be associated with the endpoint, and can be used for features + // that require a hostname, like + // :ref:`auto_host_rewrite `. + string hostname = 3; } // An Endpoint that Envoy can route traffic to. diff --git a/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto b/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto index a80d5b77d929..928aed6102df 100644 --- a/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto +++ b/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto @@ -9,12 +9,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // These are stats Envoy reports to GLB every so often. Report frequency is // defined by diff --git a/generated_api_shadow/envoy/api/v2/lds.proto b/generated_api_shadow/envoy/api/v2/lds.proto index aa13453ebc68..d1e528f2472d 100644 --- a/generated_api_shadow/envoy/api/v2/lds.proto +++ b/generated_api_shadow/envoy/api/v2/lds.proto @@ -10,6 +10,7 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/listener.proto"; @@ -19,6 +20,7 @@ option java_outer_classname = "LdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listener] // Listener :ref:`configuration overview ` diff --git a/generated_api_shadow/envoy/api/v2/listener.proto b/generated_api_shadow/envoy/api/v2/listener.proto index 5873380db801..a93df10c3128 100644 --- a/generated_api_shadow/envoy/api/v2/listener.proto +++ b/generated_api_shadow/envoy/api/v2/listener.proto @@ -7,6 +7,7 @@ import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/socket_option.proto"; import "envoy/api/v2/listener/listener_components.proto"; import "envoy/api/v2/listener/udp_listener_config.proto"; +import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/config/listener/v2/api_listener.proto"; import "google/api/annotations.proto"; @@ -14,17 +15,19 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` -// [#next-free-field: 22] +// [#next-free-field: 23] message Listener { enum DrainType { // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check @@ -237,4 +240,8 @@ message Listener { // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket // `_. bool reuse_port = 21; + + // Configuration for :ref:`access logs ` + // emitted by this listener. + repeated config.filter.accesslog.v2.AccessLog access_log = 22; } diff --git a/generated_api_shadow/envoy/api/v2/listener/listener.proto b/generated_api_shadow/envoy/api/v2/listener/listener.proto index 273b29cb5dd3..671da24b0445 100644 --- a/generated_api_shadow/envoy/api/v2/listener/listener.proto +++ b/generated_api_shadow/envoy/api/v2/listener/listener.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.api.v2.listener; +import "udpa/annotations/status.proto"; + import public "envoy/api/v2/listener/listener_components.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; diff --git a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto index ec889d7f4f46..fe449c63358a 100644 --- a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto +++ b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto @@ -12,6 +12,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; @@ -20,6 +21,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listener components] // Listener :ref:`configuration overview ` @@ -207,9 +209,32 @@ message FilterChain { string name = 7; } -// [#not-implemented-hide:] // Listener filter chain match configuration. This is a recursive structure which allows complex // nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3306 +// - destination_port_range: +// start: 15000 +// end: 15001 +// // [#next-free-field: 6] message ListenerFilterChainMatchPredicate { // A set of match configurations used for logical operations. @@ -255,17 +280,8 @@ message ListenerFilter { google.protobuf.Any typed_config = 3; } - // [#not-implemented-hide:] - // Decide when to disable this listener filter on incoming traffic. - // Example: - // 0. always enable filter - // don't set `filter_disabled` - // 1. disable when the destination port is 3306 - // rule.destination_port_range = Int32Range {start = 3306, end = 3307} - // 2. disable when the destination port is 3306 or 15000 - // rule.or_match = MatchSet.rules [ - // rule.destination_port_range = Int32Range {start = 3306, end = 3307}, - // rule.destination_port_range = Int32Range {start = 15000, end = 15001}, - // ] + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. ListenerFilterChainMatchPredicate filter_disabled = 4; } diff --git a/generated_api_shadow/envoy/api/v2/listener/quic_config.proto b/generated_api_shadow/envoy/api/v2/listener/quic_config.proto index 69069f76b7e0..2a4616bb09c9 100644 --- a/generated_api_shadow/envoy/api/v2/listener/quic_config.proto +++ b/generated_api_shadow/envoy/api/v2/listener/quic_config.proto @@ -6,6 +6,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "QuicConfigProto"; @@ -13,6 +14,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: QUIC listener Config] diff --git a/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto b/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto index 31404b41d530..d4d29531f3aa 100644 --- a/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto +++ b/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto @@ -6,6 +6,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "UdpListenerConfigProto"; @@ -13,6 +14,7 @@ option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: UDP Listener Config] // Listener :ref:`configuration overview ` diff --git a/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto b/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto index 25fb2f2b0cbf..5ac72c69a6fb 100644 --- a/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto +++ b/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.api.v2.ratelimit; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.ratelimit"; option java_outer_classname = "RatelimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common rate limit components] diff --git a/generated_api_shadow/envoy/api/v2/rds.proto b/generated_api_shadow/envoy/api/v2/rds.proto index f54308aafb55..fad73f175840 100644 --- a/generated_api_shadow/envoy/api/v2/rds.proto +++ b/generated_api_shadow/envoy/api/v2/rds.proto @@ -9,6 +9,7 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/route.proto"; @@ -18,6 +19,7 @@ option java_outer_classname = "RdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: RDS] diff --git a/generated_api_shadow/envoy/api/v2/route.proto b/generated_api_shadow/envoy/api/v2/route.proto index 87374611d808..549f134a7f43 100644 --- a/generated_api_shadow/envoy/api/v2/route.proto +++ b/generated_api_shadow/envoy/api/v2/route.proto @@ -9,12 +9,14 @@ import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP route configuration] // * Routing :ref:`architecture overview ` diff --git a/generated_api_shadow/envoy/api/v2/route/route.proto b/generated_api_shadow/envoy/api/v2/route/route.proto index ec13e9e5c801..92e44f1e19df 100644 --- a/generated_api_shadow/envoy/api/v2/route/route.proto +++ b/generated_api_shadow/envoy/api/v2/route/route.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.api.v2.route; +import "udpa/annotations/status.proto"; + import public "envoy/api/v2/route/route_components.proto"; option java_package = "io.envoyproxy.envoy.api.v2.route"; diff --git a/generated_api_shadow/envoy/api/v2/route/route_components.proto b/generated_api_shadow/envoy/api/v2/route/route_components.proto index 2ae4ee75ef30..c890134414e5 100644 --- a/generated_api_shadow/envoy/api/v2/route/route_components.proto +++ b/generated_api_shadow/envoy/api/v2/route/route_components.proto @@ -16,12 +16,14 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.route"; option java_outer_classname = "RouteComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP route components] // * Routing :ref:`architecture overview ` @@ -1274,7 +1276,7 @@ message Tracing { // statistics are perfect in the sense that they are emitted on the downstream // side such that they include network level failures. // -// Documentation for :ref:`virtual cluster statistics `. +// Documentation for :ref:`virtual cluster statistics `. // // .. note:: // diff --git a/generated_api_shadow/envoy/api/v2/scoped_route.proto b/generated_api_shadow/envoy/api/v2/scoped_route.proto index 43f81cf92027..0841bd08723c 100644 --- a/generated_api_shadow/envoy/api/v2/scoped_route.proto +++ b/generated_api_shadow/envoy/api/v2/scoped_route.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.api.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "ScopedRouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` diff --git a/generated_api_shadow/envoy/api/v2/srds.proto b/generated_api_shadow/envoy/api/v2/srds.proto index f874307d7e1e..0edb99a1eccb 100644 --- a/generated_api_shadow/envoy/api/v2/srds.proto +++ b/generated_api_shadow/envoy/api/v2/srds.proto @@ -8,6 +8,7 @@ import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import public "envoy/api/v2/scoped_route.proto"; @@ -16,6 +17,7 @@ option java_outer_classname = "SrdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: SRDS] // * Routing :ref:`architecture overview ` diff --git a/generated_api_shadow/envoy/config/accesslog/v2/als.proto b/generated_api_shadow/envoy/config/accesslog/v2/als.proto index 2486ffb81ed7..5b4106af106e 100644 --- a/generated_api_shadow/envoy/config/accesslog/v2/als.proto +++ b/generated_api_shadow/envoy/config/accesslog/v2/als.proto @@ -8,12 +8,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.grpc.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Access Log Service (ALS)] diff --git a/generated_api_shadow/envoy/config/accesslog/v2/file.proto b/generated_api_shadow/envoy/config/accesslog/v2/file.proto index 395c396d7033..9b8671c81358 100644 --- a/generated_api_shadow/envoy/config/accesslog/v2/file.proto +++ b/generated_api_shadow/envoy/config/accesslog/v2/file.proto @@ -5,12 +5,14 @@ package envoy.config.accesslog.v2; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; option java_outer_classname = "FileProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.file.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: File access log] // [#extension: envoy.access_loggers.file] diff --git a/generated_api_shadow/envoy/config/accesslog/v2/wasm.proto b/generated_api_shadow/envoy/config/accesslog/v2/wasm.proto index a7b4e2143999..3ece08a90fb0 100644 --- a/generated_api_shadow/envoy/config/accesslog/v2/wasm.proto +++ b/generated_api_shadow/envoy/config/accesslog/v2/wasm.proto @@ -7,12 +7,14 @@ import "envoy/config/wasm/v2/wasm.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.wasm.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Wasm access log] diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto index 42398bfbbd38..da29f198802f 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto @@ -9,13 +9,14 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v3"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common access log types] @@ -44,9 +45,9 @@ message AccessLog { // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig // ` oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 3 [deprecated = true]; - google.protobuf.Any typed_config = 4; + + google.protobuf.Struct hidden_envoy_deprecated_config = 3 [deprecated = true]; } } @@ -287,8 +288,8 @@ message ExtensionFilter { // Custom configuration that depends on the filter being instantiated. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } diff --git a/generated_api_shadow/envoy/config/bootstrap/v2/BUILD b/generated_api_shadow/envoy/config/bootstrap/v2/BUILD index ca88c778827b..f15f3d64622f 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v2/BUILD +++ b/generated_api_shadow/envoy/config/bootstrap/v2/BUILD @@ -14,5 +14,6 @@ api_proto_package( "//envoy/config/overload/v2alpha:pkg", "//envoy/config/trace/v2:pkg", "//envoy/config/wasm/v2:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto index 7b6244d693b3..711e846a7dfc 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto @@ -7,6 +7,7 @@ import "envoy/api/v2/cluster.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; +import "envoy/api/v2/core/event_service_config.proto"; import "envoy/api/v2/core/socket_option.proto"; import "envoy/api/v2/listener.proto"; import "envoy/config/metrics/v2/stats.proto"; @@ -19,11 +20,13 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.bootstrap.v2"; option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Bootstrap] // This proto is supplied via the :option:`-c` CLI flag and acts as the root @@ -196,6 +199,11 @@ message ClusterManager { message OutlierDetection { // Specifies the path to the outlier event log. string event_log_path = 1; + + // [#not-implemented-hide:] + // The gRPC service for the outlier detection event service. + // If empty, outlier detection events won't be sent to a remote endpoint. + api.v2.core.EventServiceConfig event_service = 2; } // Name of the local cluster (i.e., the cluster that owns the Envoy running diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD index cf8f11c4fdf6..3d63b7782529 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD +++ b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD @@ -14,8 +14,8 @@ api_proto_package( "//envoy/config/metrics/v3:pkg", "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v3:pkg", + "//envoy/config/wasm/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/extensions/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index fdfe4fd5fdda..06e52252c4ec 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -6,26 +6,28 @@ import "envoy/config/cluster/v3/cluster.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/event_service_config.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v3/trace.proto"; +import "envoy/config/wasm/v3/wasm.proto"; import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; -import "envoy/extensions/wasm/v3/wasm.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.bootstrap.v3"; option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Bootstrap] // This proto is supplied via the :option:`-c` CLI flag and acts as the root @@ -130,12 +132,6 @@ message Bootstrap { // tracing will be performed. trace.v3.Tracing tracing = 9; - // Configuration for the runtime configuration provider (deprecated). If not - // specified, a “null†provider will be used which will result in all defaults - // being used. - Runtime hidden_envoy_deprecated_runtime = 11 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // Configuration for the runtime configuration provider. If not // specified, a “null†provider will be used which will result in all defaults // being used. @@ -179,7 +175,10 @@ message Bootstrap { bool use_tcp_for_dns_lookups = 20; // Configuration for an wasm service provider(s). - repeated envoy.extensions.wasm.v3.WasmService wasm_service = 21; + repeated wasm.v3.WasmService wasm_service = 21; + + Runtime hidden_envoy_deprecated_runtime = 11 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // Administration interface :ref:`operations documentation @@ -216,6 +215,11 @@ message ClusterManager { // Specifies the path to the outlier event log. string event_log_path = 1; + + // [#not-implemented-hide:] + // The gRPC service for the outlier detection event service. + // If empty, outlier detection events won't be sent to a remote endpoint. + core.v3.EventServiceConfig event_service = 2; } // Name of the local cluster (i.e., the cluster that owns the Envoy running diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD new file mode 100644 index 000000000000..3234587e27e6 --- /dev/null +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD @@ -0,0 +1,21 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/bootstrap/v3:pkg", + "//envoy/config/cluster/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/listener/v3:pkg", + "//envoy/config/metrics/v3:pkg", + "//envoy/config/overload/v3:pkg", + "//envoy/config/trace/v4alpha:pkg", + "//envoy/config/wasm/v3:pkg", + "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto new file mode 100644 index 000000000000..149c3cf0ed2e --- /dev/null +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -0,0 +1,383 @@ +syntax = "proto3"; + +package envoy.config.bootstrap.v4alpha; + +import "envoy/config/cluster/v4alpha/cluster.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/event_service_config.proto"; +import "envoy/config/core/v4alpha/socket_option.proto"; +import "envoy/config/listener/v3/listener.proto"; +import "envoy/config/metrics/v3/stats.proto"; +import "envoy/config/overload/v3/overload.proto"; +import "envoy/config/trace/v4alpha/trace.proto"; +import "envoy/config/wasm/v3/wasm.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/cert.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.bootstrap.v4alpha"; +option java_outer_classname = "BootstrapProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Bootstrap] +// This proto is supplied via the :option:`-c` CLI flag and acts as the root +// of the Envoy v2 configuration. See the :ref:`v2 configuration overview +// ` for more detail. + +// Bootstrap :ref:`configuration overview `. +// [#next-free-field: 22] +message Bootstrap { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.Bootstrap"; + + message StaticResources { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.Bootstrap.StaticResources"; + + // Static :ref:`Listeners `. These listeners are + // available regardless of LDS configuration. + repeated listener.v3.Listener listeners = 1; + + // If a network based configuration source is specified for :ref:`cds_config + // `, it's necessary + // to have some initial cluster definitions available to allow Envoy to know + // how to speak to the management server. These cluster definitions may not + // use :ref:`EDS ` (i.e. they should be static + // IP or DNS-based). + repeated cluster.v4alpha.Cluster clusters = 2; + + // These static secrets can be used by :ref:`SdsSecretConfig + // ` + repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3; + } + + message DynamicResources { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.Bootstrap.DynamicResources"; + + reserved 4; + + // All :ref:`Listeners ` are provided by a single + // :ref:`LDS ` configuration source. + core.v4alpha.ConfigSource lds_config = 1; + + // All post-bootstrap :ref:`Cluster ` definitions are + // provided by a single :ref:`CDS ` + // configuration source. + core.v4alpha.ConfigSource cds_config = 2; + + // A single :ref:`ADS ` source may be optionally + // specified. This must have :ref:`api_type + // ` :ref:`GRPC + // `. Only + // :ref:`ConfigSources ` that have + // the :ref:`ads ` field set will be + // streamed on the ADS channel. + core.v4alpha.ApiConfigSource ads_config = 3; + } + + reserved 10, 11; + + reserved "runtime"; + + // Node identity to present to the management server and for instance + // identification purposes (e.g. in generated headers). + core.v4alpha.Node node = 1; + + // Statically specified resources. + StaticResources static_resources = 2; + + // xDS configuration sources. + DynamicResources dynamic_resources = 3; + + // Configuration for the cluster manager which owns all upstream clusters + // within the server. + ClusterManager cluster_manager = 4; + + // Health discovery service config option. + // (:ref:`core.ApiConfigSource `) + core.v4alpha.ApiConfigSource hds_config = 14; + + // Optional file system path to search for startup flag files. + string flags_path = 5; + + // Optional set of stats sinks. + repeated metrics.v3.StatsSink stats_sinks = 6; + + // Configuration for internal processing of stats. + metrics.v3.StatsConfig stats_config = 13; + + // Optional duration between flushes to configured stats sinks. For + // performance reasons Envoy latches counters and only flushes counters and + // gauges at a periodic interval. If not specified the default is 5000ms (5 + // seconds). + // Duration must be at least 1ms and at most 5 min. + google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { + lt {seconds: 300} + gte {nanos: 1000000} + }]; + + // Optional watchdog configuration. + Watchdog watchdog = 8; + + // Configuration for an external tracing provider. If not specified, no + // tracing will be performed. + trace.v4alpha.Tracing tracing = 9; + + // Configuration for the runtime configuration provider. If not + // specified, a “null†provider will be used which will result in all defaults + // being used. + LayeredRuntime layered_runtime = 17; + + // Configuration for the local administration HTTP server. + Admin admin = 12; + + // Optional overload manager configuration. + overload.v3.OverloadManager overload_manager = 15; + + // Enable :ref:`stats for event dispatcher `, defaults to false. + // Note that this records a value for each iteration of the event loop on every thread. This + // should normally be minimal overhead, but when using + // :ref:`statsd `, it will send each observed value + // over the wire individually because the statsd protocol doesn't have any way to represent a + // histogram summary. Be aware that this can be a very large volume of data. + bool enable_dispatcher_stats = 16; + + // Optional string which will be used in lieu of x-envoy in prefixing headers. + // + // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be + // transformed into x-foo-retry-on etc. + // + // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the + // headers Envoy will trust for core code and core extensions only. Be VERY careful making + // changes to this string, especially in multi-layer Envoy deployments or deployments using + // extensions which are not upstream. + string header_prefix = 18; + + // Optional proxy version which will be used to set the value of :ref:`server.version statistic + // ` if specified. Envoy will not process this value, it will be sent as is to + // :ref:`stats sinks `. + google.protobuf.UInt64Value stats_server_version_override = 19; + + // Always use TCP queries instead of UDP queries for DNS lookups. + // This may be overridden on a per-cluster basis in cds_config, + // when :ref:`dns_resolvers ` and + // :ref:`use_tcp_for_dns_lookups ` are + // specified. + bool use_tcp_for_dns_lookups = 20; + + // Configuration for an wasm service provider(s). + repeated wasm.v3.WasmService wasm_service = 21; +} + +// Administration interface :ref:`operations documentation +// `. +message Admin { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Admin"; + + // The path to write the access log for the administration server. If no + // access log is desired specify ‘/dev/null’. This is only required if + // :ref:`address ` is set. + string access_log_path = 1; + + // The cpu profiler output path for the administration server. If no profile + // path is specified, the default is ‘/var/log/envoy/envoy.prof’. + string profile_path = 2; + + // The TCP address that the administration server will listen on. + // If not specified, Envoy will not start an administration server. + core.v4alpha.Address address = 3; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated core.v4alpha.SocketOption socket_options = 4; +} + +// Cluster manager :ref:`architecture overview `. +message ClusterManager { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.ClusterManager"; + + message OutlierDetection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.ClusterManager.OutlierDetection"; + + // Specifies the path to the outlier event log. + string event_log_path = 1; + + // [#not-implemented-hide:] + // The gRPC service for the outlier detection event service. + // If empty, outlier detection events won't be sent to a remote endpoint. + core.v4alpha.EventServiceConfig event_service = 2; + } + + // Name of the local cluster (i.e., the cluster that owns the Envoy running + // this configuration). In order to enable :ref:`zone aware routing + // ` this option must be set. + // If *local_cluster_name* is defined then :ref:`clusters + // ` must be defined in the :ref:`Bootstrap + // static cluster resources + // `. This is unrelated to + // the :option:`--service-cluster` option which does not `affect zone aware + // routing `_. + string local_cluster_name = 1; + + // Optional global configuration for outlier detection. + OutlierDetection outlier_detection = 2; + + // Optional configuration used to bind newly established upstream connections. + // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. + core.v4alpha.BindConfig upstream_bind_config = 3; + + // A management server endpoint to stream load stats to via + // *StreamLoadStats*. This must have :ref:`api_type + // ` :ref:`GRPC + // `. + core.v4alpha.ApiConfigSource load_stats_config = 4; +} + +// Envoy process watchdog configuration. When configured, this monitors for +// nonresponsive threads and kills the process after the configured thresholds. +// See the :ref:`watchdog documentation ` for more information. +message Watchdog { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; + + // The duration after which Envoy counts a nonresponsive thread in the + // *watchdog_miss* statistic. If not specified the default is 200ms. + google.protobuf.Duration miss_timeout = 1; + + // The duration after which Envoy counts a nonresponsive thread in the + // *watchdog_mega_miss* statistic. If not specified the default is + // 1000ms. + google.protobuf.Duration megamiss_timeout = 2; + + // If a watched thread has been nonresponsive for this duration, assume a + // programming error and kill the entire Envoy process. Set to 0 to disable + // kill behavior. If not specified the default is 0 (disabled). + google.protobuf.Duration kill_timeout = 3; + + // If at least two watched threads have been nonresponsive for at least this + // duration assume a true deadlock and kill the entire Envoy process. Set to 0 + // to disable this behavior. If not specified the default is 0 (disabled). + google.protobuf.Duration multikill_timeout = 4; +} + +// Runtime :ref:`configuration overview ` (deprecated). +message Runtime { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Runtime"; + + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. Envoy + // will watch the location for changes and reload the file system tree when + // they happen. If this parameter is not set, there will be no disk based + // runtime. + string symlink_root = 1; + + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + string subdirectory = 2; + + // Specifies an optional subdirectory to load within the root directory. If + // specified and the directory exists, configuration values within this + // directory will override those found in the primary subdirectory. This is + // useful when Envoy is deployed across many different types of servers. + // Sometimes it is useful to have a per service cluster directory for runtime + // configuration. See below for exactly how the override directory is used. + string override_subdirectory = 3; + + // Static base runtime. This will be :ref:`overridden + // ` by other runtime layers, e.g. + // disk or admin. This follows the :ref:`runtime protobuf JSON representation + // encoding `. + google.protobuf.Struct base = 4; +} + +// [#next-free-field: 6] +message RuntimeLayer { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.RuntimeLayer"; + + // :ref:`Disk runtime ` layer. + message DiskLayer { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer"; + + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. + // Envoy will watch the location for changes and reload the file system tree + // when they happen. See documentation on runtime :ref:`atomicity + // ` for further details on how reloads are + // treated. + string symlink_root = 1; + + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + string subdirectory = 3; + + // :ref:`Append ` the + // service cluster to the path under symlink root. + bool append_service_cluster = 2; + } + + // :ref:`Admin console runtime ` layer. + message AdminLayer { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer"; + } + + // :ref:`Runtime Discovery Service (RTDS) ` layer. + message RtdsLayer { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer"; + + // Resource to subscribe to at *rtds_config* for the RTDS layer. + string name = 1; + + // RTDS configuration source. + core.v4alpha.ConfigSource rtds_config = 2; + } + + // Descriptive name for the runtime layer. This is only used for the runtime + // :http:get:`/runtime` output. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof layer_specifier { + option (validate.required) = true; + + // :ref:`Static runtime ` layer. + // This follows the :ref:`runtime protobuf JSON representation encoding + // `. Unlike static xDS resources, this static + // layer is overridable by later layers in the runtime virtual filesystem. + google.protobuf.Struct static_layer = 2; + + DiskLayer disk_layer = 3; + + AdminLayer admin_layer = 4; + + RtdsLayer rtds_layer = 5; + } +} + +// Runtime :ref:`configuration overview `. +message LayeredRuntime { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.LayeredRuntime"; + + // The :ref:`layers ` of the runtime. This is ordered + // such that later layers in the list overlay earlier entries. + repeated RuntimeLayer layers = 1; +} diff --git a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto index 8d70015100be..a0fdadd75724 100644 --- a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.cluster.aggregate.v2alpha; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.aggregate.v2alpha"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.clusters.aggregate.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Aggregate cluster configuration] diff --git a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto index 24252699d846..33f5ffe057e3 100644 --- a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto @@ -5,6 +5,7 @@ package envoy.config.cluster.dynamic_forward_proxy.v2alpha; import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha"; @@ -12,6 +13,7 @@ option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.clusters.dynamic_forward_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamic forward proxy cluster configuration] diff --git a/generated_api_shadow/envoy/config/cluster/redis/BUILD b/generated_api_shadow/envoy/config/cluster/redis/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/cluster/redis/BUILD +++ b/generated_api_shadow/envoy/config/cluster/redis/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto b/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto index f36345c337e6..b1872501e8eb 100644 --- a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto @@ -5,11 +5,13 @@ package envoy.config.cluster.redis; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.redis"; option java_outer_classname = "RedisClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Redis Cluster Configuration] // This cluster adds support for `Redis Cluster `_, as part diff --git a/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto b/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto index 42de29b01e5b..96e69701cda2 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto @@ -7,13 +7,14 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Circuit breakers] diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index c6717cb0164b..e8e451de8e6b 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -19,14 +19,15 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cluster configuration] @@ -85,17 +86,6 @@ message Cluster { // for an explanation. RANDOM = 3; - // Refer to the :ref:`original destination load balancing - // policy` - // for an explanation. - // - // .. attention:: - // - // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. - // - hidden_envoy_deprecated_ORIGINAL_DST_LB = 4 - [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; - // Refer to the :ref:`Maglev load balancing policy` // for an explanation. MAGLEV = 5; @@ -111,6 +101,9 @@ message Cluster { // and instead using the new load_balancing_policy field as the one and only mechanism for // configuring this.] LOAD_BALANCING_POLICY_CONFIG = 7; + + hidden_envoy_deprecated_ORIGINAL_DST_LB = 4 + [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; } // When V4_ONLY is selected, the DNS resolver will only perform a lookup for @@ -381,7 +374,7 @@ message Cluster { } // Common configuration for all load balancer implementations. - // [#next-free-field: 7] + // [#next-free-field: 8] message CommonLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.CommonLbConfig"; @@ -419,6 +412,16 @@ message Cluster { "envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; } + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + message ConsistentHashingLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; + + // If set to `true`, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + bool use_hostname_for_hashing = 1; + } + // Configures the :ref:`healthy panic threshold `. // If not specified, the default is 50%. // To disable panic mode, set to 0%. @@ -427,11 +430,9 @@ message Cluster { // The specified percent will be truncated to the nearest 1%. type.v3.Percent healthy_panic_threshold = 1; - oneof locality_config_specifier { - ZoneAwareLbConfig zone_aware_lb_config = 2; + google.protobuf.Duration update_merge_window = 4; - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } + bool ignore_new_hosts_until_first_hc = 5; // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when @@ -447,7 +448,7 @@ message Cluster { // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is // because merging those updates isn't currently safe. See // https://github.com/envoyproxy/envoy/pull/3941. - google.protobuf.Duration update_merge_window = 4; + bool close_connections_on_host_set_change = 6; // If set to true, Envoy will not consider new hosts when computing load balancing weights until // they have been health checked for the first time. This will have no effect unless @@ -468,11 +469,16 @@ message Cluster { // // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not // contribute to the calculation when deciding whether panic mode is enabled or not. - bool ignore_new_hosts_until_first_hc = 5; + ConsistentHashingLbConfig consistent_hashing_lb_config = 7; - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - bool close_connections_on_host_set_change = 6; + oneof locality_config_specifier { + // If set to `true`, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + ZoneAwareLbConfig zone_aware_lb_config = 2; + + //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + LocalityWeightedLbConfig locality_weighted_lb_config = 3; + } } message RefreshRate { @@ -555,41 +561,26 @@ message Cluster { // `. string alt_stat_name = 28; - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + // The :ref:`service discovery type ` + // to use for resolving the cluster. + EdsClusterConfig eds_cluster_config = 3; - // The custom cluster type. - CustomClusterType cluster_type = 38; - } + // The custom cluster type. + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; // The timeout for new network connections to hosts in the cluster. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + endpoint.v3.ClusterLoadAssignment load_assignment = 33; // The :ref:`load balancer type ` to use // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; - - // If the service discovery type is - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS`, - // then hosts is required. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`load_assignment` field instead. - // - repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true]; + repeated core.v3.HealthCheck health_checks = 8; // Setting this is required for specifying members of // :ref:`STATIC`, @@ -602,42 +593,33 @@ message Cluster { // Setting this allows non-EDS cluster types to contain embedded EDS equivalent // :ref:`endpoint assignments`. // - endpoint.v3.ClusterLoadAssignment load_assignment = 33; + google.protobuf.UInt32Value max_requests_per_connection = 9; // Optional :ref:`active health checking ` // configuration for the cluster. If no // configuration is specified no health checking will be done and all cluster // members will be considered healthy at all times. - repeated core.v3.HealthCheck health_checks = 8; + CircuitBreakers circuit_breakers = 10; // Optional maximum requests for a single upstream connection. This parameter // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; + core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; // Optional :ref:`circuit breaking ` for the cluster. - CircuitBreakers circuit_breakers = 10; - - // The TLS configuration for connections to the upstream cluster. - // - // .. attention:: - // - // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - // set, `transport_socket` takes priority. - envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = - 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + core.v3.HttpProtocolOptions common_http_protocol_options = 29; // HTTP protocol options that are applied only to upstream HTTP connections. // These options apply to all HTTP versions. - core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; + core.v3.Http1ProtocolOptions http_protocol_options = 13; // Additional options when handling HTTP requests upstream. These options will be applicable to // both HTTP1 and HTTP2 requests. - core.v3.HttpProtocolOptions common_http_protocol_options = 29; + core.v3.Http2ProtocolOptions http2_protocol_options = 14; // Additional options when handling HTTP1 requests. - core.v3.Http1ProtocolOptions http_protocol_options = 13; + map typed_extension_protocol_options = 36; // Even if default HTTP2 protocol options are desired, this field must be // set so that Envoy will assume that the upstream supports HTTP/2 when @@ -645,20 +627,14 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - core.v3.Http2ProtocolOptions http2_protocol_options = 14; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map hidden_envoy_deprecated_extension_protocol_options = 35 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + google.protobuf.Duration dns_refresh_rate = 16 + [(validate.rules).duration = {gt {nanos: 1000000}}]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. - map typed_extension_protocol_options = 36; + RefreshRate dns_failure_refresh_rate = 44; // If the DNS refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -669,8 +645,7 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 - [(validate.rules).duration = {gt {nanos: 1000000}}]; + bool respect_dns_ttl = 39; // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -680,17 +655,17 @@ message Cluster { // other than :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS` this setting is // ignored. - RefreshRate dns_failure_refresh_rate = 44; + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS // resolution. - bool respect_dns_ttl = 39; + repeated core.v3.Address dns_resolvers = 18; // The DNS IP address resolution policy. If this setting is not specified, the // value defaults to // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + bool use_tcp_for_dns_lookups = 45; // If DNS resolvers are specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -702,16 +677,16 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - repeated core.v3.Address dns_resolvers = 18; + OutlierDetection outlier_detection = 19; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. - bool use_tcp_for_dns_lookups = 45; + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. - OutlierDetection outlier_detection = 19; + core.v3.BindConfig upstream_bind_config = 21; // The interval for removing stale hosts from a cluster type // :ref:`ORIGINAL_DST`. @@ -726,56 +701,47 @@ message Cluster { // value defaults to 5000ms. For cluster types other than // :ref:`ORIGINAL_DST` // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + LbSubsetConfig lb_subset_config = 22; // Optional configuration used to bind newly established upstream connections. // This overrides any bind_config specified in the bootstrap proto. // If the address and port are empty, no bind will be performed. - core.v3.BindConfig upstream_bind_config = 21; + CommonLbConfig common_lb_config = 27; // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; + core.v3.TransportSocket transport_socket = 24; - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; + // Optional configuration for the Ring Hash load balancing policy. + core.v3.Metadata metadata = 25; - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; + // Optional configuration for the Original Destination load balancing policy. + ClusterProtocolSelection protocol_selection = 26; - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } + // Optional configuration for the LeastRequest load balancing policy. + UpstreamConnectionOptions upstream_connection_options = 30; // Common configuration for all load balancer implementations. - CommonLbConfig common_lb_config = 27; + bool close_connections_on_host_health_failure = 31; // Optional custom transport socket implementation to use for upstream connections. // To setup TLS, set a transport socket with name `tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. - core.v3.TransportSocket transport_socket = 24; + bool ignore_health_on_host_removal = 32; // The Metadata field can be used to provide additional information about the // cluster. It can be used for stats, logging, and varying filter behavior. // Fields should use reverse DNS notation to denote which entity within Envoy // will need the information. For instance, if the metadata is intended for // the Router filter, the filter name should be specified as *envoy.filters.http.router*. - core.v3.Metadata metadata = 25; + repeated Filter filters = 40; // Determines how Envoy selects the protocol used to speak to upstream hosts. - ClusterProtocolSelection protocol_selection = 26; + LoadBalancingPolicy load_balancing_policy = 41; // Optional options for upstream connections. - UpstreamConnectionOptions upstream_connection_options = 30; + core.v3.ConfigSource lrs_server = 42; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. @@ -790,45 +756,64 @@ message Cluster { // the unhealthy status is detected. If there are a large number of connections open // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of // time exclusively closing these connections, and not processing any other traffic. - bool close_connections_on_host_health_failure = 31; + bool track_timeout_budgets = 47; // If set to true, Envoy will ignore the health value of a host when processing its removal // from service discovery. This means that if active health checking is used, Envoy will *not* // wait for the endpoint to go unhealthy before removing it. - bool ignore_health_on_host_removal = 32; + repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true]; // An (optional) network filter chain, listed in the order the filters should be applied. // The chain will be applied to all outgoing connections that Envoy makes to the upstream // servers of this cluster. - repeated Filter filters = 40; + envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = + 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - LoadBalancingPolicy load_balancing_policy = 41; + map hidden_envoy_deprecated_extension_protocol_options = 35 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - core.v3.ConfigSource lrs_server = 42; + oneof cluster_discovery_type { + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; + // If track_timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + CustomClusterType cluster_type = 38; + } + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + RingHashLbConfig ring_hash_lb_config = 23; + + OriginalDstLbConfig original_dst_lb_config = 34; + + LeastRequestLbConfig least_request_lb_config = 37; + } } // [#not-implemented-hide:] Extensible load balancing policy configuration. @@ -860,11 +845,9 @@ message LoadBalancingPolicy { // Required. The name of the LB policy. string name = 1; - // Optional config for the LB policy. - // No more than one of these two fields may be populated. - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } // Each client will iterate over the list in order and stop at the first policy that it diff --git a/generated_api_shadow/envoy/config/cluster/v3/filter.proto b/generated_api_shadow/envoy/config/cluster/v3/filter.proto index 9ded0fbbb12b..af3116ec26eb 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/filter.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/filter.proto @@ -4,13 +4,14 @@ package envoy.config.cluster.v3; import "google/protobuf/any.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "FilterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Upstream filters] // Upstream filters apply to the connections to the upstream cluster hosts. diff --git a/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto b/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto index 1364b197f5cb..c0b4d5732db5 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto @@ -5,13 +5,14 @@ package envoy.config.cluster.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "OutlierDetectionProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Outlier detection] diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD b/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD new file mode 100644 index 000000000000..3aff84b82faa --- /dev/null +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/cluster/v3:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/endpoint/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/circuit_breaker.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/circuit_breaker.proto new file mode 100644 index 000000000000..57a263a70d2e --- /dev/null +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/circuit_breaker.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.config.cluster.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; +option java_outer_classname = "CircuitBreakerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Circuit breakers] + +// :ref:`Circuit breaking` settings can be +// specified individually for each defined priority. +message CircuitBreakers { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.CircuitBreakers"; + + // A Thresholds defines CircuitBreaker settings for a + // :ref:`RoutingPriority`. + // [#next-free-field: 9] + message Thresholds { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.CircuitBreakers.Thresholds"; + + message RetryBudget { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget"; + + // Specifies the limit on concurrent retries as a percentage of the sum of active requests and + // active pending requests. For example, if there are 100 active requests and the + // budget_percent is set to 25, there may be 25 active retries. + // + // This parameter is optional. Defaults to 20%. + type.v3.Percent budget_percent = 1; + + // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the + // number of active retries may never go below this number. + // + // This parameter is optional. Defaults to 3. + google.protobuf.UInt32Value min_retry_concurrency = 2; + } + + // The :ref:`RoutingPriority` + // the specified CircuitBreaker settings apply to. + core.v4alpha.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; + + // The maximum number of connections that Envoy will make to the upstream + // cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_connections = 2; + + // The maximum number of pending requests that Envoy will allow to the + // upstream cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_pending_requests = 3; + + // The maximum number of parallel requests that Envoy will make to the + // upstream cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_requests = 4; + + // The maximum number of parallel retries that Envoy will allow to the + // upstream cluster. If not specified, the default is 3. + google.protobuf.UInt32Value max_retries = 5; + + // Specifies a limit on concurrent retries in relation to the number of active requests. This + // parameter is optional. + // + // .. note:: + // + // If this field is set, the retry budget will override any configured retry circuit + // breaker. + RetryBudget retry_budget = 8; + + // If track_remaining is true, then stats will be published that expose + // the number of resources remaining until the circuit breakers open. If + // not specified, the default is false. + // + // .. note:: + // + // If a retry budget is used in lieu of the max_retries circuit breaker, + // the remaining retry resources remaining will not be tracked. + bool track_remaining = 6; + + // The maximum number of connection pools per cluster that Envoy will concurrently support at + // once. If not specified, the default is unlimited. Set this for clusters which create a + // large number of connection pools. See + // :ref:`Circuit Breaking ` for + // more details. + google.protobuf.UInt32Value max_connection_pools = 7; + } + + // If multiple :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no Thresholds is defined for a given + // :ref:`RoutingPriority`, the default values + // are used. + repeated Thresholds thresholds = 1; +} diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto new file mode 100644 index 000000000000..887ef9c3fe33 --- /dev/null +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -0,0 +1,873 @@ +syntax = "proto3"; + +package envoy.config.cluster.v4alpha; + +import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; +import "envoy/config/cluster/v4alpha/filter.proto"; +import "envoy/config/cluster/v4alpha/outlier_detection.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/health_check.proto"; +import "envoy/config/core/v4alpha/protocol.proto"; +import "envoy/config/endpoint/v3/endpoint.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; +option java_outer_classname = "ClusterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Cluster configuration] + +// Configuration for a single upstream cluster. +// [#next-free-field: 48] +message Cluster { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; + + // Refer to :ref:`service discovery type ` + // for an explanation on each type. + enum DiscoveryType { + // Refer to the :ref:`static discovery type` + // for an explanation. + STATIC = 0; + + // Refer to the :ref:`strict DNS discovery + // type` + // for an explanation. + STRICT_DNS = 1; + + // Refer to the :ref:`logical DNS discovery + // type` + // for an explanation. + LOGICAL_DNS = 2; + + // Refer to the :ref:`service discovery type` + // for an explanation. + EDS = 3; + + // Refer to the :ref:`original destination discovery + // type` + // for an explanation. + ORIGINAL_DST = 4; + } + + // Refer to :ref:`load balancer type ` architecture + // overview section for information on each type. + enum LbPolicy { + reserved 4; + + reserved "ORIGINAL_DST_LB"; + + // Refer to the :ref:`round robin load balancing + // policy` + // for an explanation. + ROUND_ROBIN = 0; + + // Refer to the :ref:`least request load balancing + // policy` + // for an explanation. + LEAST_REQUEST = 1; + + // Refer to the :ref:`ring hash load balancing + // policy` + // for an explanation. + RING_HASH = 2; + + // Refer to the :ref:`random load balancing + // policy` + // for an explanation. + RANDOM = 3; + + // Refer to the :ref:`Maglev load balancing policy` + // for an explanation. + MAGLEV = 5; + + // This load balancer type must be specified if the configured cluster provides a cluster + // specific load balancer. Consult the configured cluster's documentation for whether to set + // this option or not. + CLUSTER_PROVIDED = 6; + + // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy + // ` field to determine the LB policy. + // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field + // and instead using the new load_balancing_policy field as the one and only mechanism for + // configuring this.] + LOAD_BALANCING_POLICY_CONFIG = 7; + } + + // When V4_ONLY is selected, the DNS resolver will only perform a lookup for + // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will + // only perform a lookup for addresses in the IPv6 family. If AUTO is + // specified, the DNS resolver will first perform a lookup for addresses in + // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + // For cluster types other than + // :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS`, + // this setting is + // ignored. + enum DnsLookupFamily { + AUTO = 0; + V4_ONLY = 1; + V6_ONLY = 2; + } + + enum ClusterProtocolSelection { + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + USE_CONFIGURED_PROTOCOL = 0; + + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + USE_DOWNSTREAM_PROTOCOL = 1; + } + + // TransportSocketMatch specifies what transport socket config will be used + // when the match conditions are satisfied. + message TransportSocketMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.TransportSocketMatch"; + + // The name of the match, used in stats generation. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Optional endpoint metadata match criteria. + // The connection to the endpoint with metadata matching what is set in this field + // will use the transport socket configuration specified here. + // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match + // against the values specified in this field. + google.protobuf.Struct match = 2; + + // The configuration of the transport socket. + core.v4alpha.TransportSocket transport_socket = 3; + } + + // Extended cluster type. + message CustomClusterType { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CustomClusterType"; + + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + google.protobuf.Any typed_config = 2; + } + + // Only valid when discovery type is EDS. + message EdsClusterConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.EdsClusterConfig"; + + // Configuration for the source of EDS updates for this Cluster. + core.v4alpha.ConfigSource eds_config = 1; + + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + } + + // Optionally divide the endpoints in this cluster into subsets defined by + // endpoint metadata and selected by route and weighted cluster metadata. + // [#next-free-field: 8] + message LbSubsetConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.LbSubsetConfig"; + + // If NO_FALLBACK is selected, a result + // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, + // any cluster endpoint may be returned (subject to policy, health checks, + // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + enum LbSubsetFallbackPolicy { + NO_FALLBACK = 0; + ANY_ENDPOINT = 1; + DEFAULT_SUBSET = 2; + } + + // Specifications for subsets. + message LbSubsetSelector { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector"; + + // Allows to override top level fallback policy per selector. + enum LbSubsetSelectorFallbackPolicy { + // If NOT_DEFINED top level config fallback policy is used instead. + NOT_DEFINED = 0; + + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. + NO_FALLBACK = 1; + + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned + // (subject to policy, health checks, etc). + ANY_ENDPOINT = 2; + + // If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + DEFAULT_SUBSET = 3; + + // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata + // keys reduced to + // :ref:`fallback_keys_subset`. + // It allows for a fallback to a different, less specific selector if some of the keys of + // the selector are considered optional. + KEYS_SUBSET = 4; + } + + // List of keys to match with the weighted cluster metadata. + repeated string keys = 1; + + // The behavior used when no endpoint subset matches the selected route's + // metadata. + LbSubsetSelectorFallbackPolicy fallback_policy = 2 + [(validate.rules).enum = {defined_only: true}]; + + // Subset of + // :ref:`keys` used by + // :ref:`KEYS_SUBSET` + // fallback policy. + // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. + // For any other fallback policy the parameter is not used and should not be set. + // Only values also present in + // :ref:`keys` are allowed, but + // `fallback_keys_subset` cannot be equal to `keys`. + repeated string fallback_keys_subset = 3; + } + + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; + + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the *envoy.lb* + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + google.protobuf.Struct default_subset = 2; + + // For each entry, LbEndpoint.Metadata's + // *envoy.lb* namespace is traversed and a subset is created for each unique + // combination of key and value. For example: + // + // .. code-block:: json + // + // { "subset_selectors": [ + // { "keys": [ "version" ] }, + // { "keys": [ "stage", "hardware_type" ] } + // ]} + // + // A subset is matched when the metadata from the selected route and + // weighted cluster contains the same keys and values as the subset's + // metadata. The same host may appear in multiple subsets. + repeated LbSubsetSelector subset_selectors = 3; + + // If true, routing to subsets will take into account the localities and locality weights of the + // endpoints when making the routing decision. + // + // There are some potential pitfalls associated with enabling this feature, as the resulting + // traffic split after applying both a subset match and locality weights might be undesirable. + // + // Consider for example a situation in which you have 50/50 split across two localities X/Y + // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 + // host selected but Y having 100, then a lot more load is being dumped on the single host in X + // than originally anticipated in the load balancing assignment delivered via EDS. + bool locality_weight_aware = 4; + + // When used with locality_weight_aware, scales the weight of each locality by the ratio + // of hosts in the subset vs hosts in the original subset. This aims to even out the load + // going to an individual locality if said locality is disproportionately affected by the + // subset predicate. + bool scale_locality_weight = 5; + + // If true, when a fallback policy is configured and its corresponding subset fails to find + // a host this will cause any host to be selected instead. + // + // This is useful when using the default subset as the fallback policy, given the default + // subset might become empty. With this option enabled, if that happens the LB will attempt + // to select a host from the entire cluster. + bool panic_mode_any = 6; + + // If true, metadata specified for a metadata key will be matched against the corresponding + // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value + // and any of the elements in the list matches the criteria. + bool list_as_any = 7; + } + + // Specific configuration for the LeastRequest load balancing policy. + message LeastRequestLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.LeastRequestLbConfig"; + + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + } + + // Specific configuration for the :ref:`RingHash` + // load balancing policy. + message RingHashLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.RingHashLbConfig"; + + // The hash function used to hash hosts onto the ketama ring. + enum HashFunction { + // Use `xxHash `_, this is the default hash function. + XX_HASH = 0; + + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + MURMUR_HASH_2 = 1; + } + + reserved 2; + + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; + + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; + + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; + } + + // Specific configuration for the + // :ref:`Original Destination ` + // load balancing policy. + message OriginalDstLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.OriginalDstLbConfig"; + + // When true, :ref:`x-envoy-original-dst-host + // ` can be used to override destination + // address. + // + // .. attention:: + // + // This header isn't sanitized by default, so enabling this feature allows HTTP clients to + // route traffic to arbitrary hosts and/or ports, which may have serious security + // consequences. + bool use_http_header = 1; + } + + // Common configuration for all load balancer implementations. + // [#next-free-field: 8] + message CommonLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CommonLbConfig"; + + // Configuration for :ref:`zone aware routing + // `. + message ZoneAwareLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig"; + + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + type.v3.Percent routing_enabled = 1; + + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + google.protobuf.UInt64Value min_cluster_size = 2; + + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + bool fail_traffic_on_panic = 3; + } + + // Configuration for :ref:`locality weighted load balancing + // ` + message LocalityWeightedLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; + } + + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + message ConsistentHashingLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; + + // If set to `true`, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + bool use_hostname_for_hashing = 1; + } + + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // The specified percent will be truncated to the nearest 1%. + type.v3.Percent healthy_panic_threshold = 1; + + oneof locality_config_specifier { + ZoneAwareLbConfig zone_aware_lb_config = 2; + + LocalityWeightedLbConfig locality_weighted_lb_config = 3; + } + + // If set, all health check/weight/metadata updates that happen within this duration will be + // merged and delivered in one shot when the duration expires. The start of the duration is when + // the first update happens. This is useful for big clusters, with potentially noisy deploys + // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes + // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new + // cluster). Please always keep in mind that the use of sandbox technologies may change this + // behavior. + // + // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge + // window to 0. + // + // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is + // because merging those updates isn't currently safe. See + // https://github.com/envoyproxy/envoy/pull/3941. + google.protobuf.Duration update_merge_window = 4; + + // If set to true, Envoy will not consider new hosts when computing load balancing weights until + // they have been health checked for the first time. This will have no effect unless + // active health checking is also configured. + // + // Ignoring a host means that for any load balancing calculations that adjust weights based + // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and + // panic mode) Envoy will exclude these hosts in the denominator. + // + // For example, with hosts in two priorities P0 and P1, where P0 looks like + // {healthy, unhealthy (new), unhealthy (new)} + // and where P1 looks like + // {healthy, healthy} + // all traffic will still hit P0, as 1 / (3 - 2) = 1. + // + // Enabling this will allow scaling up the number of hosts for a given cluster without entering + // panic mode or triggering priority spillover, assuming the hosts pass the first health check. + // + // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not + // contribute to the calculation when deciding whether panic mode is enabled or not. + bool ignore_new_hosts_until_first_hc = 5; + + // If set to `true`, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + bool close_connections_on_host_set_change = 6; + + //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig consistent_hashing_lb_config = 7; + } + + message RefreshRate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.RefreshRate"; + + // Specifies the base interval between refreshes. This parameter is required and must be greater + // than zero and less than + // :ref:`max_interval `. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {nanos: 1000000} + }]; + + // Specifies the maximum interval between refreshes. This parameter is optional, but must be + // greater than or equal to the + // :ref:`base_interval ` if set. The default + // is 10 times the :ref:`base_interval `. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; + } + + reserved 12, 15, 7, 11, 35; + + reserved "hosts", "tls_context", "extension_protocol_options"; + + // Configuration to use different transport sockets for different endpoints. + // The entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata ` + // is used to match against the transport sockets as they appear in the list. The first + // :ref:`match ` is used. + // For example, with the following match + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "enableMTLS" + // match: + // acceptMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // - name: "defaultToPlaintext" + // match: {} + // transport_socket: + // name: envoy.transport_sockets.raw_buffer + // + // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + // + // If a :ref:`socket match ` with empty match + // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + // socket match in case above. + // + // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or + // *transport_socket* specified in this cluster. + // + // This field allows gradual and flexible transport socket configuration changes. + // + // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + // has "acceptPlaintext": "true" metadata information. + // + // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + // *TransportSocketMatch* in this field. Other client Envoys receive CDS without + // *transport_socket_match* set, and still send plain text traffic to the same cluster. + // + // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] + repeated TransportSocketMatch transport_socket_matches = 43; + + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // An optional alternative to the cluster name to be used while emitting stats. + // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be + // confused with :ref:`Router Filter Header + // `. + string alt_stat_name = 28; + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } + + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig eds_cluster_config = 3; + + // The timeout for new network connections to hosts in the cluster. + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes the *hosts* field in the v2 API. + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + // + endpoint.v3.ClusterLoadAssignment load_assignment = 33; + + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + repeated core.v4alpha.HealthCheck health_checks = 8; + + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + google.protobuf.UInt32Value max_requests_per_connection = 9; + + // Optional :ref:`circuit breaking ` for the cluster. + CircuitBreakers circuit_breakers = 10; + + // HTTP protocol options that are applied only to upstream HTTP connections. + // These options apply to all HTTP versions. + core.v4alpha.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; + + // Additional options when handling HTTP requests upstream. These options will be applicable to + // both HTTP1 and HTTP2 requests. + core.v4alpha.HttpProtocolOptions common_http_protocol_options = 29; + + // Additional options when handling HTTP1 requests. + core.v4alpha.Http1ProtocolOptions http_protocol_options = 13; + + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map typed_extension_protocol_options = 36; + + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. The value configured must be at least 1ms. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + google.protobuf.Duration dns_refresh_rate = 16 + [(validate.rules).duration = {gt {nanos: 1000000}}]; + + // If the DNS failure refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is + // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types + // other than :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS` this setting is + // ignored. + RefreshRate dns_failure_refresh_rate = 44; + + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + bool respect_dns_ttl = 39; + + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + repeated core.v4alpha.Address dns_resolvers = 18; + + // [#next-major-version: Reconcile DNS options in a single message.] + // Always use TCP queries instead of UDP queries for DNS lookups. + bool use_tcp_for_dns_lookups = 45; + + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + OutlierDetection outlier_detection = 19; + + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + core.v4alpha.BindConfig upstream_bind_config = 21; + + // Configuration for load balancing subsetting. + LbSubsetConfig lb_subset_config = 22; + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; + + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; + + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + } + + // Common configuration for all load balancer implementations. + CommonLbConfig common_lb_config = 27; + + // Optional custom transport socket implementation to use for upstream connections. + // To setup TLS, set a transport socket with name `tls` and + // :ref:`UpstreamTlsContexts ` in the `typed_config`. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + core.v4alpha.TransportSocket transport_socket = 24; + + // The Metadata field can be used to provide additional information about the + // cluster. It can be used for stats, logging, and varying filter behavior. + // Fields should use reverse DNS notation to denote which entity within Envoy + // will need the information. For instance, if the metadata is intended for + // the Router filter, the filter name should be specified as *envoy.filters.http.router*. + core.v4alpha.Metadata metadata = 25; + + // Determines how Envoy selects the protocol used to speak to upstream hosts. + ClusterProtocolSelection protocol_selection = 26; + + // Optional options for upstream connections. + UpstreamConnectionOptions upstream_connection_options = 30; + + // If an upstream host becomes unhealthy (as determined by the configured health checks + // or outlier detection), immediately close all connections to the failed host. + // + // .. note:: + // + // This is currently only supported for connections created by tcp_proxy. + // + // .. note:: + // + // The current implementation of this feature closes all connections immediately when + // the unhealthy status is detected. If there are a large number of connections open + // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of + // time exclusively closing these connections, and not processing any other traffic. + bool close_connections_on_host_health_failure = 31; + + // If set to true, Envoy will ignore the health value of a host when processing its removal + // from service discovery. This means that if active health checking is used, Envoy will *not* + // wait for the endpoint to go unhealthy before removing it. + bool ignore_health_on_host_removal = 32; + + // An (optional) network filter chain, listed in the order the filters should be applied. + // The chain will be applied to all outgoing connections that Envoy makes to the upstream + // servers of this cluster. + repeated Filter filters = 40; + + // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the + // :ref:`lb_policy` field has the value + // :ref:`LOAD_BALANCING_POLICY_CONFIG`. + LoadBalancingPolicy load_balancing_policy = 41; + + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + core.v4alpha.ConfigSource lrs_server = 42; + + // If track_timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool track_timeout_budgets = 47; +} + +// [#not-implemented-hide:] Extensible load balancing policy configuration. +// +// Every LB policy defined via this mechanism will be identified via a unique name using reverse +// DNS notation. If the policy needs configuration parameters, it must define a message for its +// own configuration, which will be stored in the config field. The name of the policy will tell +// clients which type of message they should expect to see in the config field. +// +// Note that there are cases where it is useful to be able to independently select LB policies +// for choosing a locality and for choosing an endpoint within that locality. For example, a +// given deployment may always use the same policy to choose the locality, but for choosing the +// endpoint within the locality, some clusters may use weighted-round-robin, while others may +// use some sort of session-based balancing. +// +// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a +// child LB policy for each locality. For each request, the parent chooses the locality and then +// delegates to the child policy for that locality to choose the endpoint within the locality. +// +// To facilitate this, the config message for the top-level LB policy may include a field of +// type LoadBalancingPolicy that specifies the child policy. +message LoadBalancingPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.LoadBalancingPolicy"; + + message Policy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.LoadBalancingPolicy.Policy"; + + reserved 2; + + reserved "config"; + + // Required. The name of the LB policy. + string name = 1; + + google.protobuf.Any typed_config = 3; + } + + // Each client will iterate over the list in order and stop at the first policy that it + // supports. This provides a mechanism for starting to use new LB policies that are not yet + // supported by all clients. + repeated Policy policies = 1; +} + +// An extensible structure containing the address Envoy should bind to when +// establishing upstream connections. +message UpstreamBindConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.UpstreamBindConfig"; + + // The address Envoy should bind to when establishing upstream connections. + core.v4alpha.Address source_address = 1; +} + +message UpstreamConnectionOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.UpstreamConnectionOptions"; + + // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + core.v4alpha.TcpKeepalive tcp_keepalive = 1; +} diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto new file mode 100644 index 000000000000..eb825fdeb6d5 --- /dev/null +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.config.cluster.v4alpha; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; +option java_outer_classname = "FilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Upstream filters] +// Upstream filters apply to the connections to the upstream cluster hosts. + +message Filter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Filter"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Any typed_config = 2; +} diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto new file mode 100644 index 000000000000..29a1e01270d9 --- /dev/null +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto @@ -0,0 +1,151 @@ +syntax = "proto3"; + +package envoy.config.cluster.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; +option java_outer_classname = "OutlierDetectionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Outlier detection] + +// See the :ref:`architecture overview ` for +// more information on outlier detection. +// [#next-free-field: 21] +message OutlierDetection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.OutlierDetection"; + + // The number of consecutive 5xx responses or local origin errors that are mapped + // to 5xx error codes before a consecutive 5xx ejection + // occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_5xx = 1; + + // The time interval between ejection analysis sweeps. This can result in + // both new ejections as well as hosts being returned to service. Defaults + // to 10000ms or 10s. + google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; + + // The base time that a host is ejected for. The real time is equal to the + // base time multiplied by the number of times the host has been ejected. + // Defaults to 30000ms or 30s. + google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; + + // The maximum % of an upstream cluster that can be ejected due to outlier + // detection. Defaults to 10% but will eject at least one host regardless of the value. + google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive 5xx. This setting can be used to disable + // ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics. This setting can be used to + // disable ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; + + // The number of hosts in a cluster that must have enough request volume to + // detect success rate outliers. If the number of hosts is less than this + // setting, outlier detection via success rate statistics is not performed + // for any host in the cluster. Defaults to 5. + google.protobuf.UInt32Value success_rate_minimum_hosts = 7; + + // The minimum number of total requests that must be collected in one + // interval (as defined by the interval duration above) to include this host + // in success rate based outlier detection. If the volume is lower than this + // setting, outlier detection via success rate statistics is not performed + // for that host. Defaults to 100. + google.protobuf.UInt32Value success_rate_request_volume = 8; + + // This factor is used to determine the ejection threshold for success rate + // outlier ejection. The ejection threshold is the difference between the + // mean success rate, and the product of this factor and the standard + // deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + google.protobuf.UInt32Value success_rate_stdev_factor = 9; + + // The number of consecutive gateway failures (502, 503, 504 status codes) + // before a consecutive gateway failure ejection occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_gateway_failure = 10; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive gateway failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 0. + google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 + [(validate.rules).uint32 = {lte: 100}]; + + // Determines whether to distinguish local origin failures from external errors. If set to true + // the following configuration parameters are taken into account: + // :ref:`consecutive_local_origin_failure`, + // :ref:`enforcing_consecutive_local_origin_failure` + // and + // :ref:`enforcing_local_origin_success_rate`. + // Defaults to false. + bool split_external_local_origin_errors = 12; + + // The number of consecutive locally originated failures before ejection + // occurs. Defaults to 5. Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value consecutive_local_origin_failure = 13; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive locally originated failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics for locally originated errors. + // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 + [(validate.rules).uint32 = {lte: 100}]; + + // The failure percentage to use when determining failure percentage-based outlier detection. If + // the failure percentage of a given host is greater than or equal to this value, it will be + // ejected. Defaults to 85. + google.protobuf.UInt32Value failure_percentage_threshold = 16 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status is detected through + // failure percentage statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + // + // [#next-major-version: setting this without setting failure_percentage_threshold should be + // invalid in v4.] + google.protobuf.UInt32Value enforcing_failure_percentage = 17 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status is detected through + // local-origin failure percentage statistics. This setting can be used to disable ejection or to + // ramp it up slowly. Defaults to 0. + google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 + [(validate.rules).uint32 = {lte: 100}]; + + // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. + // If the total number of hosts in the cluster is less than this value, failure percentage-based + // ejection will not be performed. Defaults to 5. + google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; + + // The minimum number of total requests that must be collected in one interval (as defined by the + // interval duration above) to perform failure percentage-based ejection for this host. If the + // volume is lower than this setting, failure percentage-based ejection will not be performed for + // this host. Defaults to 50. + google.protobuf.UInt32Value failure_percentage_request_volume = 20; +} diff --git a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto b/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto index d96dfc2c4c99..3941c20aeb80 100644 --- a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto +++ b/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto @@ -8,6 +8,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v2alpha"; @@ -15,6 +16,7 @@ option java_outer_classname = "DnsCacheProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.dynamic_forward_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamic forward proxy common configuration] @@ -46,7 +48,12 @@ message DnsCacheConfig { // // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be // added in a future change. - google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gt {}}]; + // + // .. note: + // + // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. + google.protobuf.Duration dns_refresh_rate = 3 + [(validate.rules).duration = {gte {nanos: 1000000}}]; // The TTL for hosts that are unused. Hosts that have not been used in the configured time // interval will be purged. If not specified defaults to 5m. diff --git a/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto b/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto index 5751b78cabbf..262557b35623 100644 --- a/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto +++ b/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto @@ -6,12 +6,14 @@ import "envoy/api/v2/core/config_source.proto"; import "envoy/service/tap/v2alpha/common.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.common.tap.v2alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.tap.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common tap extension configuration] diff --git a/generated_api_shadow/envoy/config/core/v3/address.proto b/generated_api_shadow/envoy/config/core/v3/address.proto index 92649ff6ae49..a9dc3c6e1e30 100644 --- a/generated_api_shadow/envoy/config/core/v3/address.proto +++ b/generated_api_shadow/envoy/config/core/v3/address.proto @@ -6,13 +6,14 @@ import "envoy/config/core/v3/socket_option.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "AddressProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Network addresses] @@ -53,29 +54,29 @@ message SocketAddress { // via :ref:`resolver_name `. string address = 2 [(validate.rules).string = {min_bytes: 1}]; + string resolver_name = 5; + + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. + bool ipv4_compat = 6; + oneof port_specifier { option (validate.required) = true; + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. + // When binding to an IPv6 address above, this enables `IPv4 compatibility + // `_. Binding to ``::`` will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as ``::FFFF:``. string named_port = 4; } - - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. - string resolver_name = 5; - - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. - bool ipv4_compat = 6; } message TcpKeepalive { diff --git a/generated_api_shadow/envoy/config/core/v3/backoff.proto b/generated_api_shadow/envoy/config/core/v3/backoff.proto index 63fc868435ad..55b504e71657 100644 --- a/generated_api_shadow/envoy/config/core/v3/backoff.proto +++ b/generated_api_shadow/envoy/config/core/v3/backoff.proto @@ -4,13 +4,14 @@ package envoy.config.core.v3; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "BackoffProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Backoff Strategy] diff --git a/generated_api_shadow/envoy/config/core/v3/base.proto b/generated_api_shadow/envoy/config/core/v3/base.proto index 6316a393e47f..f9d7759cc7fa 100644 --- a/generated_api_shadow/envoy/config/core/v3/base.proto +++ b/generated_api_shadow/envoy/config/core/v3/base.proto @@ -13,13 +13,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "BaseProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common types] @@ -163,40 +164,36 @@ message Node { // Locality specifying where the Envoy instance is running. Locality locality = 4; - // This is motivated by informing a management server during canary which - // version of Envoy is being tested in a heterogeneous fleet. This will be set - // by Envoy in management server RPCs. - // This field is deprecated in favor of the user_agent_name and user_agent_version values. - string hidden_envoy_deprecated_build_version = 5 [deprecated = true]; - // Free-form string that identifies the entity requesting config. // E.g. "envoy" or "grpc" string user_agent_name = 6; - oneof user_agent_version_type { - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - string user_agent_version = 7; + // Free-form string that identifies the version of the entity requesting config. + // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" + repeated Extension extensions = 9; - // Structured version of the entity requesting config. - BuildVersion user_agent_build_version = 8; - } + // Structured version of the entity requesting config. + repeated string client_features = 10; // List of extensions and their versions supported by the node. - repeated Extension extensions = 9; + repeated Address listening_addresses = 11; // Client feature support list. These are well known features described // in the Envoy API repository for a given major version of an API. Client features // use reverse DNS naming scheme, for example `com.acme.feature`. // See :ref:`the list of features ` that xDS client may // support. - repeated string client_features = 10; + string hidden_envoy_deprecated_build_version = 5 [deprecated = true]; - // Known listening ports on the node as a generic hint to the management server - // for filtering :ref:`listeners ` to be returned. For example, - // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. - repeated Address listening_addresses = 11; + oneof user_agent_version_type { + // Known listening ports on the node as a generic hint to the management server + // for filtering :ref:`listeners ` to be returned. For example, + // if there is a listener bound to port 80, the list can optionally contain the + // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. + string user_agent_version = 7; + + BuildVersion user_agent_build_version = 8; + } } // Metadata provides additional inputs to filters based on matched listeners, @@ -240,6 +237,17 @@ message RuntimeUInt32 { string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; } +// Runtime derived double with a default when not specified. +message RuntimeDouble { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeDouble"; + + // Default value if runtime value is not available. + double default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; +} + // Runtime derived bool with a default when not specified. message RuntimeFeatureFlag { option (udpa.annotations.versioning).previous_message_type = @@ -368,9 +376,9 @@ message TransportSocket { // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } diff --git a/generated_api_shadow/envoy/config/core/v3/config_source.proto b/generated_api_shadow/envoy/config/core/v3/config_source.proto index 26852a7f589c..159542a3e909 100644 --- a/generated_api_shadow/envoy/config/core/v3/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v3/config_source.proto @@ -7,14 +7,15 @@ import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "ConfigSourceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Configuration sources] @@ -139,28 +140,30 @@ message RateLimitSettings { message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; - oneof config_source_specifier { - option (validate.required) = true; + // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for :ref:`secret `, + // the certificate and key files are also watched for updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // Envoy will only watch the file path for *moves.* This is because in general only moves + // are atomic. The same method of swapping files as is demonstrated in the + // :ref:`runtime documentation ` can be used here also. + google.protobuf.Duration initial_fetch_timeout = 4; - // Path on the filesystem to source and watch for configuration updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - string path = 1; + // API configuration source. + ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; - // API configuration source. - ApiConfigSource api_config_source = 2; + oneof config_source_specifier { + option (validate.required) = true; // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. - AggregatedConfigSource ads = 3; + string path = 1; // [#not-implemented-hide:] // When set, the client will access the resources from the same server it got the @@ -173,20 +176,20 @@ message ConfigSource { // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since // this field can implicitly mean to use the same stream in the case where the ConfigSource // is provided via ADS and the specified data can also be obtained via ADS.] - SelfConfigSource self = 5; - } + ApiConfigSource api_config_source = 2; - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - google.protobuf.Duration initial_fetch_timeout = 4; + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). The default is 15s. + AggregatedConfigSource ads = 3; - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. - ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; + // API version for xDS resources. This implies the type URLs that the client + // will request for resources and the resource type that the client will in + // turn expect to be delivered. + SelfConfigSource self = 5; + } } diff --git a/generated_api_shadow/envoy/config/core/v3/event_service_config.proto b/generated_api_shadow/envoy/config/core/v3/event_service_config.proto new file mode 100644 index 000000000000..b3552e3975a3 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v3/event_service_config.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/grpc_service.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "EventServiceConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#not-implemented-hide:] +// Configuration of the event reporting service endpoint. +message EventServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.EventServiceConfig"; + + oneof config_source_specifier { + option (validate.required) = true; + + // Specifies the gRPC service that hosts the event reporting service. + GrpcService grpc_service = 1; + } +} diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto b/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto new file mode 100644 index 000000000000..800d7b5332a0 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "GrpcMethodListProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: gRPC method list] + +// A list of gRPC methods which can be used as an allowlist, for example. +message GrpcMethodList { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcMethodList"; + + message Service { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcMethodList.Service"; + + // The name of the gRPC service. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The names of the gRPC methods in this service. + repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; + } + + repeated Service services = 1; +} diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto index 45fd33bebf7a..654d3ed81b56 100644 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto @@ -10,13 +10,14 @@ import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "GrpcServiceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC services] @@ -113,9 +114,9 @@ message GrpcService { string name = 1; oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } @@ -233,26 +234,26 @@ message GrpcService { reserved 4; + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + google.protobuf.Duration timeout = 3; + + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + repeated HeaderValue initial_metadata = 5; + oneof target_specifier { option (validate.required) = true; - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. + // The timeout for the gRPC request. This is the timeout for a specific + // request. EnvoyGrpc envoy_grpc = 1; - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. + // Additional metadata to include in streams initiated to the GrpcService. + // This can be used for scenarios in which additional ad hoc authorization + // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. GoogleGrpc google_grpc = 2; } - - // The timeout for the gRPC request. This is the timeout for a specific - // request. - google.protobuf.Duration timeout = 3; - - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. - repeated HeaderValue initial_metadata = 5; } diff --git a/generated_api_shadow/envoy/config/core/v3/health_check.proto b/generated_api_shadow/envoy/config/core/v3/health_check.proto index c534fa77a777..2ed3b69eaa4f 100644 --- a/generated_api_shadow/envoy/config/core/v3/health_check.proto +++ b/generated_api_shadow/envoy/config/core/v3/health_check.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/event_service_config.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http.proto"; import "envoy/type/v3/range.proto"; @@ -12,14 +13,15 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health check] // * Health checking :ref:`architecture overview `. @@ -52,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 22] +// [#next-free-field: 23] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; @@ -79,7 +81,8 @@ message HealthCheck { // The value of the host header in the HTTP health check request. If // left empty (default value), the name of the cluster this health check is associated - // with will be used. + // with will be used. The host header can be customized for a specific endpoint by setting the + // :ref:`hostname ` field. string host = 1; // Specifies the HTTP path that will be requested during health checking. For example @@ -92,16 +95,6 @@ message HealthCheck { // [#not-implemented-hide:] HTTP specific response. Payload receive = 4; - // An optional service name parameter which is used to validate the identity of - // the health checked cluster. See the :ref:`architecture overview - // ` for more information. - // - // .. attention:: - // - // This field has been deprecated in favor of `service_name_matcher` for better flexibility - // over matching with service-cluster name. - string hidden_envoy_deprecated_service_name = 5 [deprecated = true]; - // Specifies a list of HTTP headers that should be added to each request that is sent to the // health checked cluster. For more information, including details on header value syntax, see // the documentation on :ref:`custom request headers @@ -113,12 +106,6 @@ message HealthCheck { // health checked cluster. repeated string request_headers_to_remove = 8; - // If set, health checks will be made using http/2. - // Deprecated, use :ref:`codec_client_type - // ` instead. - bool hidden_envoy_deprecated_use_http2 = 7 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open // semantics of :ref:`Int64Range `. The start and end of each @@ -133,6 +120,11 @@ message HealthCheck { // `. See the :ref:`architecture overview // ` for more information. type.matcher.v3.StringMatcher service_name_matcher = 11; + + string hidden_envoy_deprecated_service_name = 5 [deprecated = true]; + + bool hidden_envoy_deprecated_use_http2 = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } message TcpHealthCheck { @@ -176,7 +168,8 @@ message HealthCheck { // The value of the :authority header in the gRPC health check request. If // left empty (default value), the name of the cluster this health check is associated - // with will be used. + // with will be used. The authority header can be customized for a specific endpoint by setting + // the :ref:`hostname ` field. string authority = 2; } @@ -191,9 +184,9 @@ message HealthCheck { // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } @@ -260,21 +253,17 @@ message HealthCheck { // Reuse health check connection between health checks. Default is true. google.protobuf.BoolValue reuse_connection = 7; - oneof health_checker { - option (validate.required) = true; - - // HTTP health check. - HttpHealthCheck http_health_check = 8; + // HTTP health check. + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; - // TCP health check. - TcpHealthCheck tcp_health_check = 9; + // TCP health check. + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; - // gRPC health check. - GrpcHealthCheck grpc_health_check = 11; + // gRPC health check. + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; - // Custom health check. - CustomHealthCheck custom_health_check = 13; - } + // Custom health check. + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; // The "no traffic interval" is a special health check interval that is used when a cluster has // never had traffic routed to it. This lower interval allows cluster information to be kept up to @@ -284,14 +273,14 @@ message HealthCheck { // any other. // // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + string event_log_path = 17; // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; + EventServiceConfig event_service = 22; // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks @@ -299,24 +288,33 @@ message HealthCheck { // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; + bool always_log_health_check_failures = 19; // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; + TlsOptions tls_options = 21; - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - string event_log_path = 17; + oneof health_checker { + option (validate.required) = true; - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - bool always_log_health_check_failures = 19; + // Specifies the path to the :ref:`health check event log `. + // If empty, no event log will be written. + HttpHealthCheck http_health_check = 8; - // This allows overriding the cluster TLS settings, just for health check connections. - TlsOptions tls_options = 21; + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + TcpHealthCheck tcp_health_check = 9; + + // If set to true, health check failure events will always be logged. If set to false, only the + // initial health check failure event will be logged. + // The default value is false. + GrpcHealthCheck grpc_health_check = 11; + + // This allows overriding the cluster TLS settings, just for health check connections. + CustomHealthCheck custom_health_check = 13; + } } diff --git a/generated_api_shadow/envoy/config/core/v3/http_uri.proto b/generated_api_shadow/envoy/config/core/v3/http_uri.proto index 481ba9378570..6cc4d36d3944 100644 --- a/generated_api_shadow/envoy/config/core/v3/http_uri.proto +++ b/generated_api_shadow/envoy/config/core/v3/http_uri.proto @@ -4,13 +4,14 @@ package envoy.config.core.v3; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "HttpUriProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Service URI ] @@ -28,6 +29,20 @@ message HttpUri { // string uri = 1 [(validate.rules).string = {min_bytes: 1}]; + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + // + google.protobuf.Duration timeout = 3 [(validate.rules).duration = { + required: true + gte {} + }]; + // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or // inline DNS resolution. See `issue @@ -35,21 +50,7 @@ message HttpUri { oneof http_upstream_type { option (validate.required) = true; - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; } - - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 [(validate.rules).duration = { - required: true - gte {} - }]; } diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto index 8d23ba229e45..400b0dd95a94 100644 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v3/protocol.proto @@ -5,13 +5,14 @@ package envoy.config.core.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "ProtocolProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Protocol options] @@ -37,12 +38,33 @@ message UpstreamHttpProtocolOptions { bool auto_san_validation = 2; } +// [#next-free-field: 6] message HttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpProtocolOptions"; + // Action to take when Envoy receives client request with header names containing underscore + // characters. + // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + // characters. + enum HeadersWithUnderscoresAction { + // Allow headers with underscores. This is the default behavior. + ALLOW = 0; + + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + REJECT_REQUEST = 1; + + // Drop the header with name containing underscores. The header is dropped before the filter chain is + // invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + DROP_HEADER = 2; + } + // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. If not set, there is no idle timeout. When the + // period in which there are no active requests. When the // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 // downstream connection a drain sequence will occur prior to closing the connection, see // :ref:`drain_timeout @@ -73,6 +95,11 @@ message HttpProtocolOptions { // The current implementation implements this timeout on downstream connections only. // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; + + // Action to take when a client request with a header name containing underscore characters is received. + // If this setting is not specified, the value defaults to ALLOW. + // Note: upstream responses are not affected by this setting. + HeadersWithUnderscoresAction headers_with_underscores_action = 5; } // [#next-free-field: 6] diff --git a/generated_api_shadow/envoy/config/core/v3/socket_option.proto b/generated_api_shadow/envoy/config/core/v3/socket_option.proto index 0de7848aea0d..836b8f553813 100644 --- a/generated_api_shadow/envoy/config/core/v3/socket_option.proto +++ b/generated_api_shadow/envoy/config/core/v3/socket_option.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.config.core.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "SocketOptionProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Socket Option ] @@ -39,17 +40,17 @@ message SocketOption { // The numeric name as passed to setsockopt int64 name = 3; + // Because many sockopts take an int value. + SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; + oneof value { option (validate.required) = true; - // Because many sockopts take an int value. + // Otherwise it's a byte buffer. int64 int_value = 4; - // Otherwise it's a byte buffer. + // The state in which the option will be applied. When used in BindConfig + // STATE_PREBIND is currently the only valid value. bytes buf_value = 5; } - - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/config/core/v4alpha/BUILD b/generated_api_shadow/envoy/config/core/v4alpha/BUILD new file mode 100644 index 000000000000..aeac38ac2833 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/core/v4alpha/address.proto b/generated_api_shadow/envoy/config/core/v4alpha/address.proto new file mode 100644 index 000000000000..a2e6070103ae --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/address.proto @@ -0,0 +1,145 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/socket_option.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "AddressProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Network addresses] + +message Pipe { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Pipe"; + + // Unix Domain Socket path. On Linux, paths starting with '@' will use the + // abstract namespace. The starting '@' is replaced by a null byte by Envoy. + // Paths starting with '@' will result in an error in environments other than + // Linux. + string path = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The mode for the Pipe. Not applicable for abstract sockets. + uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; +} + +// [#next-free-field: 7] +message SocketAddress { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketAddress"; + + enum Protocol { + TCP = 0; + UDP = 1; + } + + Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; + + // The address for this socket. :ref:`Listeners ` will bind + // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` + // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: + // It is possible to distinguish a Listener address via the prefix/suffix matching + // in :ref:`FilterChainMatch `.] When used + // within an upstream :ref:`BindConfig `, the address + // controls the source address of outbound connections. For :ref:`clusters + // `, the cluster type determines whether the + // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS + // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized + // via :ref:`resolver_name `. + string address = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof port_specifier { + option (validate.required) = true; + + uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; + + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. + string named_port = 4; + } + + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. + string resolver_name = 5; + + // When binding to an IPv6 address above, this enables `IPv4 compatibility + // `_. Binding to ``::`` will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as ``::FFFF:``. + bool ipv4_compat = 6; +} + +message TcpKeepalive { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.TcpKeepalive"; + + // Maximum number of keepalive probes to send without response before deciding + // the connection is dead. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 9.) + google.protobuf.UInt32Value keepalive_probes = 1; + + // The number of seconds a connection needs to be idle before keep-alive probes + // start being sent. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 7200s (i.e., 2 hours.) + google.protobuf.UInt32Value keepalive_time = 2; + + // The number of seconds between keep-alive probes. Default is to use the OS + // level configuration (unless overridden, Linux defaults to 75s.) + google.protobuf.UInt32Value keepalive_interval = 3; +} + +message BindConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BindConfig"; + + // The address to bind to when creating a socket. + SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; + + // Whether to set the *IP_FREEBIND* option when creating the socket. When this + // flag is set to true, allows the :ref:`source_address + // ` to be an IP address + // that is not configured on the system running Envoy. When this flag is set + // to false, the option *IP_FREEBIND* is disabled on the socket. When this + // flag is not set (default), the socket is not modified, i.e. the option is + // neither enabled nor disabled. + google.protobuf.BoolValue freebind = 2; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated SocketOption socket_options = 3; +} + +// Addresses specify either a logical or physical address and port, which are +// used to tell Envoy where to bind/listen, connect to upstream and find +// management servers. +message Address { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Address"; + + oneof address { + option (validate.required) = true; + + SocketAddress socket_address = 1; + + Pipe pipe = 2; + } +} + +// CidrRange specifies an IP Address and a prefix length to construct +// the subnet mask for a `CIDR `_ range. +message CidrRange { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.CidrRange"; + + // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. + string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Length of prefix, e.g. 0, 32. + google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/backoff.proto b/generated_api_shadow/envoy/config/core/v4alpha/backoff.proto new file mode 100644 index 000000000000..07a2bdff175e --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/backoff.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "BackoffProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Backoff Strategy] + +// Configuration defining a jittered exponential back off strategy. +message BackoffStrategy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.BackoffStrategy"; + + // The base interval to be used for the next back off computation. It should + // be greater than zero and less than or equal to :ref:`max_interval + // `. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, + // but must be greater than or equal to the :ref:`base_interval + // ` if set. The default + // is 10 times the :ref:`base_interval + // `. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/base.proto b/generated_api_shadow/envoy/config/core/v4alpha/base.proto new file mode 100644 index 000000000000..dbc3c31e40e4 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/base.proto @@ -0,0 +1,421 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/backoff.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/type/v3/percent.proto"; +import "envoy/type/v3/semantic_version.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "BaseProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common types] + +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +enum RoutingPriority { + DEFAULT = 0; + HIGH = 1; +} + +// HTTP request method. +enum RequestMethod { + METHOD_UNSPECIFIED = 0; + GET = 1; + HEAD = 2; + POST = 3; + PUT = 4; + DELETE = 5; + CONNECT = 6; + OPTIONS = 7; + TRACE = 8; + PATCH = 9; +} + +// Identifies the direction of the traffic relative to the local Envoy. +enum TrafficDirection { + // Default option is unspecified. + UNSPECIFIED = 0; + + // The transport is used for incoming traffic. + INBOUND = 1; + + // The transport is used for outgoing traffic. + OUTBOUND = 2; +} + +// Identifies location of where either Envoy runs or where upstream hosts run. +message Locality { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Locality"; + + // Region this :ref:`zone ` belongs to. + string region = 1; + + // Defines the local service zone where Envoy is running. Though optional, it + // should be set if discovery service routing is used and the discovery + // service exposes :ref:`zone data `, + // either in this message or via :option:`--service-zone`. The meaning of zone + // is context dependent, e.g. `Availability Zone (AZ) + // `_ + // on AWS, `Zone `_ on + // GCP, etc. + string zone = 2; + + // When used for locality of upstream hosts, this field further splits zone + // into smaller chunks of sub-zones so they can be load balanced + // independently. + string sub_zone = 3; +} + +// BuildVersion combines SemVer version of extension with free-form build information +// (i.e. 'alpha', 'private-build') as a set of strings. +message BuildVersion { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BuildVersion"; + + // SemVer version of extension. + type.v3.SemanticVersion version = 1; + + // Free-form build information. + // Envoy defines several well known keys in the source/common/common/version.h file + google.protobuf.Struct metadata = 2; +} + +// Version and identification for an Envoy extension. +// [#next-free-field: 6] +message Extension { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Extension"; + + // This is the name of the Envoy filter as specified in the Envoy + // configuration, e.g. envoy.filters.http.router, com.acme.widget. + string name = 1; + + // Category of the extension. + // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" + // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from + // acme.com vendor. + // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] + string category = 2; + + // [#not-implemented-hide:] Type descriptor of extension configuration proto. + // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] + // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] + string type_descriptor = 3; + + // The version is a property of the extension and maintained independently + // of other extensions and the Envoy API. + // This field is not set when extension did not provide version information. + BuildVersion version = 4; + + // Indicates that the extension is present but was disabled via dynamic configuration. + bool disabled = 5; +} + +// Identifies a specific Envoy instance. The node identifier is presented to the +// management server, which may use this identifier to distinguish per Envoy +// configuration for serving. +// [#next-free-field: 12] +message Node { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Node"; + + reserved 5; + + reserved "build_version"; + + // An opaque node identifier for the Envoy node. This also provides the local + // service node name. It should be set if any of the following features are + // used: :ref:`statsd `, :ref:`CDS + // `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-node`. + string id = 1; + + // Defines the local service cluster name where Envoy is running. Though + // optional, it should be set if any of the following features are used: + // :ref:`statsd `, :ref:`health check cluster + // verification + // `, + // :ref:`runtime override directory `, + // :ref:`user agent addition + // `, + // :ref:`HTTP global rate limiting `, + // :ref:`CDS `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-cluster`. + string cluster = 2; + + // Opaque metadata extending the node identifier. Envoy will pass this + // directly to the management server. + google.protobuf.Struct metadata = 3; + + // Locality specifying where the Envoy instance is running. + Locality locality = 4; + + // Free-form string that identifies the entity requesting config. + // E.g. "envoy" or "grpc" + string user_agent_name = 6; + + oneof user_agent_version_type { + // Free-form string that identifies the version of the entity requesting config. + // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" + string user_agent_version = 7; + + // Structured version of the entity requesting config. + BuildVersion user_agent_build_version = 8; + } + + // List of extensions and their versions supported by the node. + repeated Extension extensions = 9; + + // Client feature support list. These are well known features described + // in the Envoy API repository for a given major version of an API. Client features + // use reverse DNS naming scheme, for example `com.acme.feature`. + // See :ref:`the list of features ` that xDS client may + // support. + repeated string client_features = 10; + + // Known listening ports on the node as a generic hint to the management server + // for filtering :ref:`listeners ` to be returned. For example, + // if there is a listener bound to port 80, the list can optionally contain the + // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. + repeated Address listening_addresses = 11; +} + +// Metadata provides additional inputs to filters based on matched listeners, +// filter chains, routes and endpoints. It is structured as a map, usually from +// filter name (in reverse DNS format) to metadata specific to the filter. Metadata +// key-values for a filter are merged as connection and request handling occurs, +// with later values for the same key overriding earlier values. +// +// An example use of metadata is providing additional values to +// http_connection_manager in the envoy.http_connection_manager.access_log +// namespace. +// +// Another example use of metadata is to per service config info in cluster metadata, which may get +// consumed by multiple filters. +// +// For load balancing, Metadata provides a means to subset cluster endpoints. +// Endpoints have a Metadata object associated and routes contain a Metadata +// object to match against. There are some well defined metadata used today for +// this purpose: +// +// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an +// endpoint and is also used during header processing +// (x-envoy-upstream-canary) and for stats purposes. +// [#next-major-version: move to type/metadata/v2] +message Metadata { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Metadata"; + + // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* + // namespace is reserved for Envoy's built-in filters. + map filter_metadata = 1; +} + +// Runtime derived uint32 with a default when not specified. +message RuntimeUInt32 { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeUInt32"; + + // Default value if runtime value is not available. + uint32 default_value = 2; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; +} + +// Runtime derived double with a default when not specified. +message RuntimeDouble { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeDouble"; + + // Default value if runtime value is not available. + double default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Runtime derived bool with a default when not specified. +message RuntimeFeatureFlag { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RuntimeFeatureFlag"; + + // Default value if runtime value is not available. + google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; + + // Runtime key to get value for comparison. This value is used if defined. The boolean value must + // be represented via its + // `canonical JSON encoding `_. + string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Header name/value pair. +message HeaderValue { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderValue"; + + // Header name. + string key = 1 + [(validate.rules).string = + {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Header value. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown header values are replaced with the empty string instead of `-`. + string value = 2 [ + (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; +} + +// Header name/value pair plus option to control append behavior. +message HeaderValueOption { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HeaderValueOption"; + + // Header name/value pair that this option applies to. + HeaderValue header = 1 [(validate.rules).message = {required: true}]; + + // Should the value be appended? If true (default), the value is appended to + // existing values. + google.protobuf.BoolValue append = 2; +} + +// Wrapper for a set of headers. +message HeaderMap { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderMap"; + + repeated HeaderValue headers = 1; +} + +// Data source consisting of either a file or an inline value. +message DataSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.DataSource"; + + oneof specifier { + option (validate.required) = true; + + // Local filesystem data source. + string filename = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Bytes inlined in the configuration. + bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; + + // String inlined in the configuration. + string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; + } +} + +// The message specifies the retry policy of remote data source when fetching fails. +message RetryPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RetryPolicy"; + + // Specifies parameters that control :ref:`retry backoff strategy `. + // This parameter is optional, in which case the default base interval is 1000 milliseconds. The + // default maximum interval is 10 times the base interval. + BackoffStrategy retry_back_off = 1; + + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. + google.protobuf.UInt32Value num_retries = 2; +} + +// The message specifies how to fetch data from remote and how to verify it. +message RemoteDataSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RemoteDataSource"; + + // The HTTP URI to fetch the remote data. + HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; + + // SHA256 string for verifying data. + string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Retry policy for fetching remote data. + RetryPolicy retry_policy = 3; +} + +// Async data source which support async data fetch. +message AsyncDataSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.AsyncDataSource"; + + oneof specifier { + option (validate.required) = true; + + // Local async data source. + DataSource local = 1; + + // Remote async data source. + RemoteDataSource remote = 2; + } +} + +// Configuration for transport socket in :ref:`listeners ` and +// :ref:`clusters `. If the configuration is +// empty, a default transport socket implementation and configuration will be +// chosen based on the platform and existence of tls_context. +message TransportSocket { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.TransportSocket"; + + reserved 2; + + reserved "config"; + + // The name of the transport socket to instantiate. The name must match a supported transport + // socket implementation. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Implementation specific configuration which depends on the implementation being instantiated. + // See the supported transport socket implementations for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not +// specified via a runtime key. +// +// .. note:: +// +// Parsing of the runtime key's data is implemented such that it may be represented as a +// :ref:`FractionalPercent ` proto represented as JSON/YAML +// and may also be represented as an integer with the assumption that the value is an integral +// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse +// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. +message RuntimeFractionalPercent { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RuntimeFractionalPercent"; + + // Default value if the runtime value's for the numerator/denominator keys are not available. + type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; + + // Runtime key for a YAML representation of a FractionalPercent. + string runtime_key = 2; +} + +// Identifies a specific ControlPlane instance that Envoy is connected to. +message ControlPlane { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ControlPlane"; + + // An opaque control plane identifier that uniquely identifies an instance + // of control plane. This can be used to identify which control plane instance, + // the Envoy is connected to. + string identifier = 1; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto new file mode 100644 index 000000000000..0cfc7fc59b94 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto @@ -0,0 +1,197 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ConfigSourceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Configuration sources] + +// xDS API version. This is used to describe both resource and transport +// protocol versions (in distinct configuration fields). +enum ApiVersion { + // When not specified, we assume v2, to ease migration to Envoy's stable API + // versioning. If a client does not support v2 (e.g. due to deprecation), this + // is an invalid value. + AUTO = 0; + + // Use xDS v2 API. + V2 = 1; + + // Use xDS v3 API. + V3 = 2; +} + +// API configuration source. This identifies the API type and cluster that Envoy +// will use to fetch an xDS API. +// [#next-free-field: 9] +message ApiConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ApiConfigSource"; + + // APIs may be fetched via either REST or gRPC. + enum ApiType { + // Ideally this would be 'reserved 0' but one can't reserve the default + // value. Instead we throw an exception if this is ever used. + hidden_envoy_deprecated_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 + [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; + + // REST-JSON v2 API. The `canonical JSON encoding + // `_ for + // the v2 protos is used. + REST = 1; + + // gRPC v2 API. + GRPC = 2; + + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + // with every update, the xDS server only sends what has changed since the last update. + // + // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. + // Do not use for other xDSes. + // [#comment:TODO(fredlas) update/remove this warning when appropriate.] + DELTA_GRPC = 3; + } + + // API type (gRPC, REST, delta gRPC) + ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; + + // Cluster names should be used only with REST. If > 1 + // cluster is defined, clusters will be cycled through if any kind of failure + // occurs. + // + // .. note:: + // + // The cluster with name ``cluster_name`` must be statically defined and its + // type must not be ``EDS``. + repeated string cluster_names = 2; + + // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, + // services will be cycled through if any kind of failure occurs. + repeated GrpcService grpc_services = 4; + + // For REST APIs, the delay between successive polls. + google.protobuf.Duration refresh_delay = 3; + + // For REST APIs, the request timeout. If not set, a default value of 1s will be used. + google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; + + // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be + // rate limited. + RateLimitSettings rate_limit_settings = 6; + + // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. + bool set_node_on_first_message_only = 7; +} + +// Aggregated Discovery Service (ADS) options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that ADS is to be used. +message AggregatedConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.AggregatedConfigSource"; +} + +// [#not-implemented-hide:] +// Self-referencing config source options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that other data can be obtained from the same server. +message SelfConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.SelfConfigSource"; +} + +// Rate Limit settings to be applied for discovery requests made by Envoy. +message RateLimitSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.RateLimitSettings"; + + // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a + // default value of 100 will be used. + google.protobuf.UInt32Value max_tokens = 1; + + // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens + // per second will be used. + google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; +} + +// Configuration for :ref:`listeners `, :ref:`clusters +// `, :ref:`routes +// `, :ref:`endpoints +// ` etc. may either be sourced from the +// filesystem or from an xDS API source. Filesystem configs are watched with +// inotify for updates. +// [#next-free-field: 7] +message ConfigSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ConfigSource"; + + oneof config_source_specifier { + option (validate.required) = true; + + // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for :ref:`secret `, + // the certificate and key files are also watched for updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // Envoy will only watch the file path for *moves.* This is because in general only moves + // are atomic. The same method of swapping files as is demonstrated in the + // :ref:`runtime documentation ` can be used here also. + string path = 1; + + // API configuration source. + ApiConfigSource api_config_source = 2; + + // When set, ADS will be used to fetch resources. The ADS API configuration + // source in the bootstrap configuration is used. + AggregatedConfigSource ads = 3; + + // [#not-implemented-hide:] + // When set, the client will access the resources from the same server it got the + // ConfigSource from, although not necessarily from the same stream. This is similar to the + // :ref:`ads` field, except that the client may use a + // different stream to the same server. As a result, this field can be used for things + // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) + // LDS to RDS on the same server without requiring the management server to know its name + // or required credentials. + // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since + // this field can implicitly mean to use the same stream in the case where the ConfigSource + // is provided via ADS and the specified data can also be obtained via ADS.] + SelfConfigSource self = 5; + } + + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). The default is 15s. + google.protobuf.Duration initial_fetch_timeout = 4; + + // API version for xDS resources. This implies the type URLs that the client + // will request for resources and the resource type that the client will in + // turn expect to be delivered. + ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/event_service_config.proto b/generated_api_shadow/envoy/config/core/v4alpha/event_service_config.proto new file mode 100644 index 000000000000..a0b4e5590d1d --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/event_service_config.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "EventServiceConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#not-implemented-hide:] +// Configuration of the event reporting service endpoint. +message EventServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.EventServiceConfig"; + + oneof config_source_specifier { + option (validate.required) = true; + + // Specifies the gRPC service that hosts the event reporting service. + GrpcService grpc_service = 1; + } +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto new file mode 100644 index 000000000000..a4a7be077b27 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "GrpcMethodListProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: gRPC method list] + +// A list of gRPC methods which can be used as an allowlist, for example. +message GrpcMethodList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcMethodList"; + + message Service { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcMethodList.Service"; + + // The name of the gRPC service. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The names of the gRPC methods in this service. + repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; + } + + repeated Service services = 1; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto new file mode 100644 index 000000000000..64bbc6b5f077 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto @@ -0,0 +1,261 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "GrpcServiceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: gRPC services] + +// gRPC service configuration. This is used by :ref:`ApiConfigSource +// ` and filter configurations. +// [#next-free-field: 6] +message GrpcService { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService"; + + message EnvoyGrpc { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.EnvoyGrpc"; + + // The name of the upstream gRPC cluster. SSL credentials will be supplied + // in the :ref:`Cluster ` :ref:`transport_socket + // `. + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // [#next-free-field: 7] + message GoogleGrpc { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc"; + + // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. + message SslCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials"; + + // PEM encoded server root certificates. + DataSource root_certs = 1; + + // PEM encoded client private key. + DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // PEM encoded client certificate chain. + DataSource cert_chain = 3; + } + + // Local channel credentials. Only UDS is supported for now. + // See https://github.com/grpc/grpc/pull/15909. + message GoogleLocalCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials"; + } + + // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call + // credential types. + message ChannelCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials"; + + oneof credential_specifier { + option (validate.required) = true; + + SslCredentials ssl_credentials = 1; + + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + google.protobuf.Empty google_default = 2; + + GoogleLocalCredentials local_credentials = 3; + } + } + + // [#next-free-field: 8] + message CallCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials"; + + message ServiceAccountJWTAccessCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." + "ServiceAccountJWTAccessCredentials"; + + string json_key = 1; + + uint64 token_lifetime_seconds = 2; + } + + message GoogleIAMCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials"; + + string authorization_token = 1; + + string authority_selector = 2; + } + + message MetadataCredentialsFromPlugin { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." + "MetadataCredentialsFromPlugin"; + + reserved 2; + + reserved "config"; + + string name = 1; + + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + // Security token service configuration that allows Google gRPC to + // fetch security token from an OAuth 2.0 authorization server. + // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and + // https://github.com/grpc/grpc/pull/19587. + // [#next-free-field: 10] + message StsService { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService"; + + // URI of the token exchange service that handles token exchange requests. + // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by + // https://github.com/envoyproxy/protoc-gen-validate/issues/303] + string token_exchange_service_uri = 1; + + // Location of the target service or resource where the client + // intends to use the requested security token. + string resource = 2; + + // Logical name of the target service where the client intends to + // use the requested security token. + string audience = 3; + + // The desired scope of the requested security token in the + // context of the service or resource where the token will be used. + string scope = 4; + + // Type of the requested security token. + string requested_token_type = 5; + + // The path of subject token, a security token that represents the + // identity of the party on behalf of whom the request is being made. + string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; + + // Type of the subject token. + string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; + + // The path of actor token, a security token that represents the identity + // of the acting party. The acting party is authorized to use the + // requested security token and act on behalf of the subject. + string actor_token_path = 8; + + // Type of the actor token. + string actor_token_type = 9; + } + + oneof credential_specifier { + option (validate.required) = true; + + // Access token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. + string access_token = 1; + + // Google Compute Engine credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + google.protobuf.Empty google_compute_engine = 2; + + // Google refresh token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. + string google_refresh_token = 3; + + // Service Account JWT Access credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. + ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; + + // Google IAM credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. + GoogleIAMCredentials google_iam = 5; + + // Custom authenticator credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. + // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. + MetadataCredentialsFromPlugin from_plugin = 6; + + // Custom security token service which implements OAuth 2.0 token exchange. + // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 + // See https://github.com/grpc/grpc/pull/19587. + StsService sts_service = 7; + } + } + + // The target URI when using the `Google C++ gRPC client + // `_. SSL credentials will be supplied in + // :ref:`channel_credentials `. + string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; + + ChannelCredentials channel_credentials = 2; + + // A set of call credentials that can be composed with `channel credentials + // `_. + repeated CallCredentials call_credentials = 3; + + // The human readable prefix to use when emitting statistics for the gRPC + // service. + // + // .. csv-table:: + // :header: Name, Type, Description + // :widths: 1, 1, 2 + // + // streams_total, Counter, Total number of streams opened + // streams_closed_, Counter, Total streams closed with + string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; + + // The name of the Google gRPC credentials factory to use. This must have been registered with + // Envoy. If this is empty, a default credentials factory will be used that sets up channel + // credentials based on other configuration parameters. + string credentials_factory_name = 5; + + // Additional configuration for site-specific customizations of the Google + // gRPC library. + google.protobuf.Struct config = 6; + } + + reserved 4; + + oneof target_specifier { + option (validate.required) = true; + + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + EnvoyGrpc envoy_grpc = 1; + + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + GoogleGrpc google_grpc = 2; + } + + // The timeout for the gRPC request. This is the timeout for a specific + // request. + google.protobuf.Duration timeout = 3; + + // Additional metadata to include in streams initiated to the GrpcService. + // This can be used for scenarios in which additional ad hoc authorization + // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + repeated HeaderValue initial_metadata = 5; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto new file mode 100644 index 000000000000..0e6c4e73c2a2 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto @@ -0,0 +1,321 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/event_service_config.proto"; +import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/v3/http.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health check] +// * Health checking :ref:`architecture overview `. +// * If health checking is configured for a cluster, additional statistics are emitted. They are +// documented :ref:`here `. + +// Endpoint health status. +enum HealthStatus { + // The health status is not known. This is interpreted by Envoy as *HEALTHY*. + UNKNOWN = 0; + + // Healthy. + HEALTHY = 1; + + // Unhealthy. + UNHEALTHY = 2; + + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as *UNHEALTHY*. + DRAINING = 3; + + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // *UNHEALTHY*. + TIMEOUT = 4; + + // Degraded. + DEGRADED = 5; +} + +// [#next-free-field: 23] +message HealthCheck { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; + + // Describes the encoding of the payload bytes in the payload. + message Payload { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.Payload"; + + oneof payload { + option (validate.required) = true; + + // Hex encoded payload. E.g., "000000FF". + string text = 1 [(validate.rules).string = {min_bytes: 1}]; + + // [#not-implemented-hide:] Binary payload. + bytes binary = 2; + } + } + + // [#next-free-field: 12] + message HttpHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.HttpHealthCheck"; + + reserved 5, 7; + + reserved "service_name", "use_http2"; + + // The value of the host header in the HTTP health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The host header can be customized for a specific endpoint by setting the + // :ref:`hostname ` field. + string host = 1; + + // Specifies the HTTP path that will be requested during health checking. For example + // */healthcheck*. + string path = 2 [(validate.rules).string = {min_bytes: 1}]; + + // [#not-implemented-hide:] HTTP specific payload. + Payload send = 3; + + // [#not-implemented-hide:] HTTP specific response. + Payload receive = 4; + + // Specifies a list of HTTP headers that should be added to each request that is sent to the + // health checked cluster. For more information, including details on header value syntax, see + // the documentation on :ref:`custom request headers + // `. + repeated HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request that is sent to the + // health checked cluster. + repeated string request_headers_to_remove = 8; + + // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default + // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open + // semantics of :ref:`Int64Range `. The start and end of each + // range are required. Only statuses in the range [100, 600) are allowed. + repeated type.v3.Int64Range expected_statuses = 9; + + // Use specified application protocol for health checks. + type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; + + // An optional service name parameter which is used to validate the identity of + // the health checked cluster using a :ref:`StringMatcher + // `. See the :ref:`architecture overview + // ` for more information. + type.matcher.v3.StringMatcher service_name_matcher = 11; + } + + message TcpHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.TcpHealthCheck"; + + // Empty payloads imply a connect-only health check. + Payload send = 1; + + // When checking the response, “fuzzy†matching is performed such that each + // binary block must be found, and in the order specified, but not + // necessarily contiguous. + repeated Payload receive = 2; + } + + message RedisHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.RedisHealthCheck"; + + // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value + // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + // by setting the specified key to any value and waiting for traffic to drain. + string key = 1; + } + + // `grpc.health.v1.Health + // `_-based + // healthcheck. See `gRPC doc `_ + // for details. + message GrpcHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.GrpcHealthCheck"; + + // An optional service name parameter which will be sent to gRPC service in + // `grpc.health.v1.HealthCheckRequest + // `_. + // message. See `gRPC health-checking overview + // `_ for more information. + string service_name = 1; + + // The value of the :authority header in the gRPC health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The authority header can be customized for a specific endpoint by setting + // the :ref:`hostname ` field. + string authority = 2; + } + + // Custom health check. + message CustomHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.CustomHealthCheck"; + + reserved 2; + + reserved "config"; + + // The registered name of the custom health checker. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // A custom health checker specific configuration which depends on the custom health checker + // being instantiated. See :api:`envoy/config/health_checker` for reference. + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + // Health checks occur over the transport socket specified for the cluster. This implies that if a + // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. + // + // This allows overriding the cluster TLS settings, just for health check connections. + message TlsOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HealthCheck.TlsOptions"; + + // Specifies the ALPN protocols for health check connections. This is useful if the + // corresponding upstream is using ALPN-based :ref:`FilterChainMatch + // ` along with different protocols for health checks + // versus data connections. If empty, no ALPN protocols will be set on health check connections. + repeated string alpn_protocols = 1; + } + + reserved 10; + + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + google.protobuf.Duration timeout = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // The interval between health checks. + google.protobuf.Duration interval = 2 [(validate.rules).duration = { + required: true + gt {} + }]; + + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + google.protobuf.Duration initial_jitter = 20; + + // An optional jitter amount in milliseconds. If specified, during every + // interval Envoy will add interval_jitter to the wait time. + google.protobuf.Duration interval_jitter = 3; + + // An optional jitter amount as a percentage of interval_ms. If specified, + // during every interval Envoy will add interval_ms * + // interval_jitter_percent / 100 to the wait time. + // + // If interval_jitter_ms and interval_jitter_percent are both set, both of + // them will be used to increase the wait time. + uint32 interval_jitter_percent = 18; + + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for *http* health checking if a host responds with 503 + // this threshold is ignored and the host is considered unhealthy immediately. + google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; + + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] Non-serving port for health checking. + google.protobuf.UInt32Value alt_port = 6; + + // Reuse health check connection between health checks. Default is true. + google.protobuf.BoolValue reuse_connection = 7; + + oneof health_checker { + option (validate.required) = true; + + // HTTP health check. + HttpHealthCheck http_health_check = 8; + + // TCP health check. + TcpHealthCheck tcp_health_check = 9; + + // gRPC health check. + GrpcHealthCheck grpc_health_check = 11; + + // Custom health check. + CustomHealthCheck custom_health_check = 13; + } + + // The "no traffic interval" is a special health check interval that is used when a cluster has + // never had traffic routed to it. This lower interval allows cluster information to be kept up to + // date, without sending a potentially large amount of active health checking traffic for no + // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. Note that this interval takes precedence over + // any other. + // + // The default value for "no traffic interval" is 60 seconds. + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as + // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the + // standard health check interval that is defined. + // + // The default value for "unhealthy interval" is the same as "interval". + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; + + // The "unhealthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as unhealthy. For subsequent health checks + // Envoy will shift back to using either "unhealthy interval" if present or the standard health + // check interval that is defined. + // + // The default value for "unhealthy edge interval" is the same as "unhealthy interval". + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; + + // The "healthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as healthy. For subsequent health checks + // Envoy will shift back to using the standard health check interval that is defined. + // + // The default value for "healthy edge interval" is the same as the default interval. + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; + + // Specifies the path to the :ref:`health check event log `. + // If empty, no event log will be written. + string event_log_path = 17; + + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventServiceConfig event_service = 22; + + // If set to true, health check failure events will always be logged. If set to false, only the + // initial health check failure event will be logged. + // The default value is false. + bool always_log_health_check_failures = 19; + + // This allows overriding the cluster TLS settings, just for health check connections. + TlsOptions tls_options = 21; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto b/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto new file mode 100644 index 000000000000..e88a9aa7d7df --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "HttpUriProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP Service URI ] + +// Envoy external URI descriptor +message HttpUri { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HttpUri"; + + // The HTTP server URI. It should be a full FQDN with protocol, host and path. + // + // Example: + // + // .. code-block:: yaml + // + // uri: https://www.googleapis.com/oauth2/v1/certs + // + string uri = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify how `uri` is to be fetched. Today, this requires an explicit + // cluster, but in the future we may support dynamic cluster creation or + // inline DNS resolution. See `issue + // `_. + oneof http_upstream_type { + option (validate.required) = true; + + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + // + string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + google.protobuf.Duration timeout = 3 [(validate.rules).duration = { + required: true + gte {} + }]; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto new file mode 100644 index 000000000000..dcb205444524 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto @@ -0,0 +1,323 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Protocol options] + +// [#not-implemented-hide:] +message TcpProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.TcpProtocolOptions"; +} + +message UpstreamHttpProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.UpstreamHttpProtocolOptions"; + + // Set transport socket `SNI `_ for new + // upstream connections based on the downstream HTTP host/authority header, as seen by the + // :ref:`router filter `. + bool auto_sni = 1; + + // Automatic validate upstream presented certificate for new upstream connections based on the + // downstream HTTP host/authority header, as seen by the + // :ref:`router filter `. + // This field is intended to set with `auto_sni` field. + bool auto_san_validation = 2; +} + +// [#next-free-field: 6] +message HttpProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.HttpProtocolOptions"; + + // Action to take when Envoy receives client request with header names containing underscore + // characters. + // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + // characters. + enum HeadersWithUnderscoresAction { + // Allow headers with underscores. This is the default behavior. + ALLOW = 0; + + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + REJECT_REQUEST = 1; + + // Drop the header with name containing underscores. The header is dropped before the filter chain is + // invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + DROP_HEADER = 2; + } + + // The idle timeout for connections. The idle timeout is defined as the + // period in which there are no active requests. When the + // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 + // downstream connection a drain sequence will occur prior to closing the connection, see + // :ref:`drain_timeout + // `. + // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. + // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. + // + // .. warning:: + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. + google.protobuf.Duration idle_timeout = 1; + + // The maximum duration of a connection. The duration is defined as a period since a connection + // was established. If not set, there is no max duration. When max_connection_duration is reached + // the connection will be closed. Drain sequence will occur prior to closing the connection if + // if's applicable. See :ref:`drain_timeout + // `. + // Note: not implemented for upstream connections. + google.protobuf.Duration max_connection_duration = 3; + + // The maximum number of headers. If unconfigured, the default + // maximum number of request headers allowed is 100. Requests that exceed this limit will receive + // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. + google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be + // reset independent of any other timeouts. If not specified, this value is not set. + // The current implementation implements this timeout on downstream connections only. + // [#comment:TODO(shikugawa): add this functionality to upstream.] + google.protobuf.Duration max_stream_duration = 4; + + // Action to take when a client request with a header name containing underscore characters is received. + // If this setting is not specified, the value defaults to ALLOW. + // Note: upstream responses are not affected by this setting. + HeadersWithUnderscoresAction headers_with_underscores_action = 5; +} + +// [#next-free-field: 6] +message Http1ProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http1ProtocolOptions"; + + message HeaderKeyFormat { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat"; + + message ProperCaseWords { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords"; + } + + oneof header_format { + option (validate.required) = true; + + // Formats the header by proper casing words: the first character and any character following + // a special character will be capitalized if it's an alpha character. For example, + // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". + // Note that while this results in most headers following conventional casing, certain headers + // are not covered. For example, the "TE" header will be formatted as "Te". + ProperCaseWords proper_case_words = 1; + } + } + + // Handle HTTP requests with absolute URLs in the requests. These requests + // are generally sent by clients to forward/explicit proxies. This allows clients to configure + // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the + // *http_proxy* environment variable. + google.protobuf.BoolValue allow_absolute_url = 1; + + // Handle incoming HTTP/1.0 and HTTP 0.9 requests. + // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 + // style connect logic, dechunking, and handling lack of client host iff + // *default_host_for_http_10* is configured. + bool accept_http_10 = 2; + + // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as + // Envoy does not otherwise support HTTP/1.0 without a Host header. + // This is a no-op if *accept_http_10* is not true. + string default_host_for_http_10 = 3; + + // Describes how the keys for response headers should be formatted. By default, all header keys + // are lower cased. + HeaderKeyFormat header_key_format = 4; + + // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. + // + // .. attention:: + // + // Note that this only happens when Envoy is chunk encoding which occurs when: + // - The request is HTTP/1.1. + // - Is neither a HEAD only request nor a HTTP Upgrade. + // - Not a response to a HEAD request. + // - The content length header is not present. + bool enable_trailers = 5; +} + +// [#next-free-field: 14] +message Http2ProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http2ProtocolOptions"; + + // Defines a parameter to be sent in the SETTINGS frame. + // See `RFC7540, sec. 6.5.1 `_ for details. + message SettingsParameter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter"; + + // The 16 bit parameter identifier. + google.protobuf.UInt32Value identifier = 1 [ + (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).message = {required: true} + ]; + + // The 32 bit parameter value. + google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; + } + + // `Maximum table size `_ + // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values + // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header + // compression. + google.protobuf.UInt32Value hpack_table_size = 1; + + // `Maximum concurrent streams `_ + // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) + // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). + google.protobuf.UInt32Value max_concurrent_streams = 2 + [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; + + // `Initial stream-level flow-control window + // `_ size. Valid values range from 65535 + // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 + // (256 * 1024 * 1024). + // + // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default + // window size now, so it's also the minimum. + // + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the codec buffers. + google.protobuf.UInt32Value initial_stream_window_size = 3 + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; + + // Similar to *initial_stream_window_size*, but for connection-level flow-control + // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. + google.protobuf.UInt32Value initial_connection_window_size = 4 + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; + + // Allows proxying Websocket and other upgrades over H2 connect. + bool allow_connect = 5; + + // [#not-implemented-hide:] Hiding until envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows metadata. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more + // information. + bool allow_metadata = 6; + + // Limit the number of pending outbound downstream frames of all types (frames that are waiting to + // be written into the socket). Exceeding this limit triggers flood mitigation and connection is + // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due + // to flood mitigation. The default limit is 10000. + // [#comment:TODO: implement same limits for upstream outbound frames as well.] + google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; + + // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, + // preventing high memory utilization when receiving continuous stream of these frames. Exceeding + // this limit triggers flood mitigation and connection is terminated. The + // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood + // mitigation. The default limit is 1000. + // [#comment:TODO: implement same limits for upstream outbound frames as well.] + google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; + + // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an + // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but + // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` + // stat tracks the number of connections terminated due to flood mitigation. + // Setting this to 0 will terminate connection upon receiving first frame with an empty payload + // and no end stream flag. The default limit is 1. + // [#comment:TODO: implement same limits for upstream inbound frames as well.] + google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; + + // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number + // of PRIORITY frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // max_inbound_priority_frames_per_stream * (1 + inbound_streams) + // + // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks + // the number of connections terminated due to flood mitigation. The default limit is 100. + // [#comment:TODO: implement same limits for upstream inbound frames as well.] + google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; + + // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number + // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // 1 + 2 * (inbound_streams + + // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) + // + // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks + // the number of connections terminated due to flood mitigation. The default limit is 10. + // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, + // but more complex implementations that try to estimate available bandwidth require at least 2. + // [#comment:TODO: implement same limits for upstream inbound frames as well.] + google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 + [(validate.rules).uint32 = {gte: 1}]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // See `RFC7540, sec. 8.1 `_ for details. + bool stream_error_on_invalid_http_messaging = 12; + + // [#not-implemented-hide:] + // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: + // + // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by + // Envoy. + // + // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field + // 'allow_connect'. + // + // Note that custom parameters specified through this field can not also be set in the + // corresponding named parameters: + // + // .. code-block:: text + // + // ID Field Name + // ---------------- + // 0x1 hpack_table_size + // 0x3 max_concurrent_streams + // 0x4 initial_stream_window_size + // + // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies + // between custom parameters with the same identifier will trigger a failure. + // + // See `IANA HTTP/2 Settings + // `_ for + // standardized identifiers. + repeated SettingsParameter custom_settings_parameters = 13; +} + +// [#not-implemented-hide:] +message GrpcProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcProtocolOptions"; + + Http2ProtocolOptions http2_protocol_options = 1; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/socket_option.proto b/generated_api_shadow/envoy/config/core/v4alpha/socket_option.proto new file mode 100644 index 000000000000..7dac394a865d --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/socket_option.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "SocketOptionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Socket Option ] + +// Generic socket option message. This would be used to set socket options that +// might not exist in upstream kernels or precompiled Envoy binaries. +// [#next-free-field: 7] +message SocketOption { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketOption"; + + enum SocketState { + // Socket options are applied after socket creation but before binding the socket to a port + STATE_PREBIND = 0; + + // Socket options are applied after binding the socket to a port but before calling listen() + STATE_BOUND = 1; + + // Socket options are applied after calling listen() + STATE_LISTENING = 2; + } + + // An optional name to give this socket option for debugging, etc. + // Uniqueness is not required and no special meaning is assumed. + string description = 1; + + // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP + int64 level = 2; + + // The numeric name as passed to setsockopt + int64 name = 3; + + oneof value { + option (validate.required) = true; + + // Because many sockopts take an int value. + int64 int_value = 4; + + // Otherwise it's a byte buffer. + bytes buf_value = 5; + } + + // The state in which the option will be applied. When used in BindConfig + // STATE_PREBIND is currently the only valid value. + SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto index a424d778732b..a65db5e7d7d8 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto @@ -9,13 +9,14 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Endpoint configuration] // Endpoint discovery :ref:`architecture overview ` @@ -95,14 +96,6 @@ message ClusterLoadAssignment { // Defaults to 0 which means endpoints never go stale. google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; - // The flag to disable overprovisioning. If it is set to true, - // :ref:`overprovisioning factor - // ` will be ignored - // and Envoy will not perform graceful failover between priority levels or - // localities as endpoints become unhealthy. Otherwise Envoy will perform - // graceful failover as :ref:`overprovisioning factor - // ` suggests. - // [#not-implemented-hide:] bool hidden_envoy_deprecated_disable_overprovisioning = 5 [deprecated = true]; } diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto index 2bb219151efd..60df915f2a9f 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto @@ -8,13 +8,14 @@ import "envoy/config/core/v3/health_check.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; option java_outer_classname = "EndpointComponentsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Endpoints] @@ -34,6 +35,13 @@ message Endpoint { // check port. Setting this with a non-zero value allows an upstream host // to have different health check address port. uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; + + // By default, the host header for L7 health checks is controlled by cluster level configuration + // (see: :ref:`host ` and + // :ref:`authority `). Setting this + // to a non-empty value allows overriding the cluster level configuration for a specific + // endpoint. + string hostname = 2; } // The upstream host address. @@ -55,6 +63,12 @@ message Endpoint { // This takes into effect only for upstream clusters with // :ref:`active health checking ` enabled. HealthCheckConfig health_check_config = 2; + + // The hostname associated with this endpoint. This hostname is not used for routing or address + // resolution. If provided, it will be associated with the endpoint, and can be used for features + // that require a hostname, like + // :ref:`auto_host_rewrite `. + string hostname = 3; } // An Endpoint that Envoy can route traffic to. @@ -62,35 +76,35 @@ message Endpoint { message LbEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LbEndpoint"; + core.v3.HealthStatus health_status = 2; + + // [#not-implemented-hide:] + core.v3.Metadata metadata = 3; + + // Optional health status when known and supplied by EDS server. + google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; + // Upstream host identifier or a named reference. oneof host_identifier { + // The endpoint metadata specifies values that may be used by the load + // balancer to select endpoints in a cluster for a given request. The filter + // name should be specified as *envoy.lb*. An example boolean key-value pair + // is *canary*, providing the optional canary status of the upstream host. + // This may be matched against in a route's + // :ref:`RouteAction ` metadata_match field + // to subset the endpoints considered in cluster load balancing. Endpoint endpoint = 1; - // [#not-implemented-hide:] + // The optional load balancing weight of the upstream host; at least 1. + // Envoy uses the load balancing weight in some of the built in load + // balancers. The load balancing weight for an endpoint is divided by the sum + // of the weights of all endpoints in the endpoint's locality to produce a + // percentage of traffic for the endpoint. This percentage is then further + // weighted by the endpoint's locality's load balancing weight from + // LocalityLbEndpoints. If unspecified, each host is presumed to have equal + // weight in a locality. string endpoint_name = 5; } - - // Optional health status when known and supplied by EDS server. - core.v3.HealthStatus health_status = 2; - - // The endpoint metadata specifies values that may be used by the load - // balancer to select endpoints in a cluster for a given request. The filter - // name should be specified as *envoy.lb*. An example boolean key-value pair - // is *canary*, providing the optional canary status of the upstream host. - // This may be matched against in a route's - // :ref:`RouteAction ` metadata_match field - // to subset the endpoints considered in cluster load balancing. - core.v3.Metadata metadata = 3; - - // The optional load balancing weight of the upstream host; at least 1. - // Envoy uses the load balancing weight in some of the built in load - // balancers. The load balancing weight for an endpoint is divided by the sum - // of the weights of all endpoints in the endpoint's locality to produce a - // percentage of traffic for the endpoint. This percentage is then further - // weighted by the endpoint's locality's load balancing weight from - // LocalityLbEndpoints. If unspecified, each host is presumed to have equal - // weight in a locality. - google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; } // A group of endpoints belonging to a Locality. diff --git a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto index 2f0454d94320..01eb7b12cf1a 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto @@ -8,13 +8,14 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // These are stats Envoy reports to GLB every so often. Report frequency is // defined by diff --git a/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto b/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto index 8a525dee9108..25d27bfbd106 100644 --- a/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto +++ b/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto @@ -10,12 +10,14 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.accesslog.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common access log types] diff --git a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto b/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto index 19f7697a68f1..2e35bb7f7c5b 100644 --- a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto +++ b/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.dubbo.router.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.dubbo_proxy.router.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Router] // Dubbo router :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto b/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto index d0d12c07a64d..016140d10f84 100644 --- a/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto +++ b/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto @@ -8,12 +8,14 @@ import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.fault.v2"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.common.fault.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common fault injection types] diff --git a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto b/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto index 98465ab97336..bd9da5a67766 100644 --- a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto +++ b/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto @@ -10,6 +10,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha"; @@ -17,6 +18,7 @@ option java_outer_classname = "AdaptiveConcurrencyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.adaptive_concurrency.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Adaptive Concurrency] // Adaptive Concurrency Control :ref:`configuration overview diff --git a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto b/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto index cd9e1d30e887..43823286286a 100644 --- a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto +++ b/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto @@ -2,9 +2,8 @@ syntax = "proto3"; package envoy.config.filter.http.aws_lambda.v2alpha; -import "udpa/annotations/status.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.aws_lambda.v2alpha"; @@ -13,6 +12,7 @@ option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.aws_lambda.v3"; option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: AWS Lambda] // AWS Lambda :ref:`configuration overview `. @@ -20,6 +20,17 @@ option (udpa.annotations.file_status).work_in_progress = true; // AWS Lambda filter config message Config { + enum InvocationMode { + // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In + // this mode the output of the Lambda function becomes the response of the HTTP request. + SYNCHRONOUS = 0; + + // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be + // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the + // call which is translated to an HTTP 200 OK by the filter. + ASYNCHRONOUS = 1; + } + // The ARN of the AWS Lambda to invoke when the filter is engaged // Must be in the following format: // arn::lambda:::function: @@ -27,6 +38,9 @@ message Config { // Whether to transform the request (headers and body) to a JSON payload or pass it as is. bool payload_passthrough = 2; + + // Determines the way to invoke the Lambda function. + InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; } // Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different diff --git a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto b/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto index 40e0bd9fcc69..5ebb92c01dfa 100644 --- a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto +++ b/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.aws_request_signing.v2alpha; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.aws_request_signing.v2alpha"; @@ -10,6 +11,7 @@ option java_outer_classname = "AwsRequestSigningProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.aws_request_signing.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: AwsRequestSigning] // AwsRequestSigning :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto b/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto index 00e0116a926d..56961d22fe09 100644 --- a/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto +++ b/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto @@ -5,12 +5,14 @@ package envoy.config.filter.http.buffer.v2; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v2"; option java_outer_classname = "BufferProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.buffer.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Buffer] // Buffer :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto index 4005b32a55c4..a9e51cf56a10 100644 --- a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto +++ b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto @@ -7,9 +7,8 @@ import "envoy/type/matcher/string.proto"; import "google/protobuf/any.proto"; -import "udpa/annotations/status.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.cache.v2alpha"; @@ -18,6 +17,7 @@ option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.cache.v3alpha"; option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP Cache Filter] // [#extension: envoy.filters.http.cache] diff --git a/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto b/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto index 54814f982073..d62d0d7a42fa 100644 --- a/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto +++ b/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto @@ -7,12 +7,14 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.compressor.v2"; option java_outer_classname = "CompressorProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.compressor.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Compressor] diff --git a/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto b/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto index 73c9efc62544..9060a9c38fda 100644 --- a/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto +++ b/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto @@ -3,11 +3,13 @@ syntax = "proto3"; package envoy.config.filter.http.cors.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.cors.v2"; option java_outer_classname = "CorsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.cors.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Cors] // CORS Filter :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto b/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto index ce38714bf45f..3c2c9110e9fe 100644 --- a/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto +++ b/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto @@ -6,12 +6,14 @@ import "envoy/api/v2/core/base.proto"; import "envoy/type/matcher/string.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2"; option java_outer_classname = "CsrfProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.csrf.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: CSRF] // Cross-Site Request Forgery :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index 17509a111415..29aa8380191b 100644 --- a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -5,6 +5,7 @@ package envoy.config.filter.http.dynamic_forward_proxy.v2alpha; import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha"; @@ -12,6 +13,7 @@ option java_outer_classname = "DynamicForwardProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.dynamic_forward_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamic forward proxy] diff --git a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto b/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto index 8de88a959209..011d22f768c8 100644 --- a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto +++ b/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto @@ -3,11 +3,13 @@ syntax = "proto3"; package envoy.config.filter.http.dynamo.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.dynamo.v2"; option java_outer_classname = "DynamoProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.dynamo.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamo] // Dynamo :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index a67e4dd6087a..a407f4628d2e 100644 --- a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -10,6 +10,7 @@ import "envoy/type/matcher/string.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; @@ -17,6 +18,7 @@ option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.ext_authz.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto b/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto index 9ce49288076f..cb99b0d71bbd 100644 --- a/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto +++ b/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto @@ -9,12 +9,14 @@ import "envoy/type/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v2"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.fault.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Fault Injection] // Fault Injection :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto b/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto index d1ba0b628987..b4331dad5031 100644 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto +++ b/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.http.grpc_http1_bridge.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_bridge.v2"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_http1_bridge.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC HTTP/1.1 Bridge] // gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto b/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto index 6869b316b5e2..8b916d327e19 100644 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto +++ b/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] // gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD index ef3541ebcb1d..69168ad0cf24 100644 --- a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD +++ b/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto b/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto index ccfb6e50dab7..7f6dd2ce4226 100644 --- a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto +++ b/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto @@ -2,7 +2,12 @@ syntax = "proto3"; package envoy.config.filter.http.grpc_stats.v2alpha; +import "envoy/api/v2/core/grpc_method_list.proto"; + +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_stats.v2alpha"; @@ -10,6 +15,7 @@ option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_stats.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC statistics] gRPC statistics filter // :ref:`configuration overview `. @@ -20,6 +26,33 @@ message FilterConfig { // If true, the filter maintains a filter state object with the request and response message // counts. bool emit_filter_state = 1; + + oneof per_method_stat_specifier { + // If set, specifies an allowlist of service/methods that will have individual stats + // emitted for them. Any call that does not match the allowlist will be counted + // in a stat with no method specifier: `cluster..grpc.*`. + api.v2.core.GrpcMethodList individual_method_stats_allowlist = 2; + + // If set to true, emit stats for all service/method names. + // + // If set to false, emit stats for all service/message types to the same stats without including + // the service/method in the name, with prefix `cluster..grpc`. This can be useful if + // service/method granularity is not needed, or if each cluster only receives a single method. + // + // .. attention:: + // This option is only safe if all clients are trusted. If this option is enabled + // with untrusted clients, the clients could cause unbounded growth in the number of stats in + // Envoy, using unbounded memory and potentially slowing down stats pipelines. + // + // .. attention:: + // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the + // behavior will default to `stats_for_all_methods=true`. This default value is deprecated, + // and in a future release, if neither field is set, it will default to + // `stats_for_all_methods=false` in order to be safe by default. This behavior can be + // controlled with runtime override + // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. + google.protobuf.BoolValue stats_for_all_methods = 3; + } } // gRPC statistics filter state object in protobuf form. diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto b/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto index 42cd3a13f842..be23b4d87b58 100644 --- a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto +++ b/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.http.grpc_web.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_web.v2"; option java_outer_classname = "GrpcWebProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_web.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Web] // gRPC Web :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto b/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto index 85fe6fbdc1d1..f3601b612b02 100644 --- a/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto +++ b/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto @@ -7,12 +7,14 @@ import "envoy/config/filter/http/compressor/v2/compressor.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v2"; option java_outer_classname = "GzipProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.gzip.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Gzip] // Gzip :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto b/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto index 0ef96f17cc21..30de69d98b1c 100644 --- a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto +++ b/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.header_to_metadata.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2"; @@ -10,6 +11,7 @@ option java_outer_classname = "HeaderToMetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.header_to_metadata.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Header-To-Metadata Filter] // @@ -69,7 +71,8 @@ message Config { // A Rule defines what metadata to apply when a header is present or missing. message Rule { // The header that triggers this rule — required. - string header = 1 [(validate.rules).string = {min_bytes: 1}]; + string header = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If the header is present, apply this metadata KeyValuePair. // diff --git a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto b/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto index e57cd0893112..d7f6da8c82d4 100644 --- a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto @@ -9,6 +9,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2"; @@ -16,6 +17,7 @@ option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.health_check.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto index 87582ab554f5..f99b18a12c71 100644 --- a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto +++ b/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto @@ -5,6 +5,7 @@ package envoy.config.filter.http.ip_tagging.v2; import "envoy/api/v2/core/address.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v2"; @@ -12,6 +13,7 @@ option java_outer_classname = "IpTaggingProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.ip_tagging.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: IP tagging] // IP tagging :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index 3f40f89cdbcc..07044f92201e 100644 --- a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -10,6 +10,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha"; @@ -17,6 +18,7 @@ option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.jwt_authn.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: JWT Authentication] // JWT Authentication :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto b/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto index c10cbe170cd4..068b5e255df5 100644 --- a/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto +++ b/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.http.lua.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v2"; option java_outer_classname = "LuaProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.lua.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Lua] // Lua :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto b/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto index 2ace47716f13..74d0ee408aeb 100644 --- a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto +++ b/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.on_demand.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.on_demand.v2"; @@ -10,6 +11,7 @@ option java_outer_classname = "OnDemandProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.on_demand.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: OnDemand] // IP tagging :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto b/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto index 5f772436ad48..0baf49cebeef 100644 --- a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto +++ b/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.original_src.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.original_src.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. diff --git a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto index 027cc8e7af03..b9361476bcfd 100644 --- a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto +++ b/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto @@ -7,6 +7,7 @@ import "envoy/config/ratelimit/v2/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v2"; @@ -14,6 +15,7 @@ option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto b/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto index 5f3292d41d50..691f23036ba8 100644 --- a/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto +++ b/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto @@ -5,12 +5,14 @@ package envoy.config.filter.http.rbac.v2; import "envoy/config/rbac/v2/rbac.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v2"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.rbac.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto b/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto index a94641cf33a5..c95500cf8168 100644 --- a/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto +++ b/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto @@ -7,12 +7,14 @@ import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.router.v2"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.router.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Router] // Router :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto b/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto index ae159423d86b..a7ae625d2ee3 100644 --- a/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto +++ b/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto @@ -6,12 +6,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v2"; option java_outer_classname = "SquashProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.squash.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Squash] // Squash :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto b/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto index 840082448454..3f984cec0d6c 100644 --- a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto +++ b/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto @@ -5,12 +5,14 @@ package envoy.config.filter.http.tap.v2alpha; import "envoy/config/common/tap/v2alpha/common.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.tap.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto b/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto index c7636652c520..ac6d7eefa78a 100644 --- a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto +++ b/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.http.transcoder.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v2"; @@ -10,6 +11,7 @@ option java_outer_classname = "TranscoderProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_json_transcoder.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC-JSON transcoder] // gRPC-JSON transcoder :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/http/wasm/v2/wasm.proto b/generated_api_shadow/envoy/config/filter/http/wasm/v2/wasm.proto deleted file mode 100644 index 001dda83a1ae..000000000000 --- a/generated_api_shadow/envoy/config/filter/http/wasm/v2/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.wasm.v2; - -import "envoy/config/wasm/v2/wasm.proto"; - -import "udpa/annotations/migrate.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.wasm.v2"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.wasm.v3"; - -// [#protodoc-title: Wasm] -// Wasm :ref:`configuration overview `. - -message Wasm { - // General Plugin configuration. - config.wasm.v2.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto b/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto index 0e87ff300968..0496207e09bc 100644 --- a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto +++ b/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.listener.http_inspector.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.http_inspector.v2"; option java_outer_classname = "HttpInspectorProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.http_inspector.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP Inspector Filter] // Detect whether the application protocol is HTTP. diff --git a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto b/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto index ab210ad4805d..fa4acee45fc1 100644 --- a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto +++ b/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.listener.original_dst.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.original_dst.v2"; option java_outer_classname = "OriginalDstProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.original_dst.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Original Dst Filter] // Use the Original destination address on downstream connections. diff --git a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto b/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto index 227a8a2572d2..1959698fd100 100644 --- a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto +++ b/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.listener.original_src.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.original_src.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. diff --git a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto b/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto index 4749434f98dc..cabffb9fc0c0 100644 --- a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto +++ b/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.listener.proxy_protocol.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.proxy_protocol.v2"; option java_outer_classname = "ProxyProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.proxy_protocol.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Proxy Protocol Filter] // PROXY protocol listener filter. diff --git a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto b/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto index fbdcc74a2a5b..7ab679c47dc5 100644 --- a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto +++ b/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.listener.tls_inspector.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.tls_inspector.v2"; option java_outer_classname = "TlsInspectorProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.tls_inspector.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: TLS Inspector Filter] // Allows detecting whether the transport appears to be TLS or plaintext. diff --git a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto index 68ef0c91a82d..d1f459078f20 100644 --- a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -7,6 +7,7 @@ import "envoy/api/v2/core/address.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2"; @@ -14,6 +15,7 @@ option java_outer_classname = "ClientSslAuthProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.client_ssl_auth.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Client TLS authentication] // Client TLS authentication diff --git a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto b/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto index 758145d6480f..15de7e3b5537 100644 --- a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto +++ b/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto @@ -5,12 +5,14 @@ package envoy.config.filter.network.direct_response.v2; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.direct_response.v2"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.direct_response.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Direct response] // Direct response :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto index 3ede3eca22d4..47248932f94c 100644 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto @@ -7,6 +7,7 @@ import "envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; @@ -14,6 +15,7 @@ option java_outer_classname = "DubboProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.dubbo_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto index a760309a160a..9af461e3577c 100644 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto +++ b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto @@ -7,6 +7,7 @@ import "envoy/type/matcher/string.proto"; import "envoy/type/range.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; @@ -14,6 +15,7 @@ option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.dubbo_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dubbo Proxy Route Configuration] // Dubbo Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto b/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto index bbf554b98898..2b51ce4e18c3 100644 --- a/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto +++ b/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto @@ -3,11 +3,13 @@ syntax = "proto3"; package envoy.config.filter.network.echo.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.echo.v2"; option java_outer_classname = "EchoProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.echo.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Echo] // Echo :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto index 8d31231a3de1..40cea7061868 100644 --- a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto +++ b/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto @@ -5,6 +5,7 @@ package envoy.config.filter.network.ext_authz.v2; import "envoy/api/v2/core/grpc_service.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v2"; @@ -12,6 +13,7 @@ option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.ext_authz.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Network External Authorization ] // The network layer external authorization service configuration diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 35fd122c06b1..3b4c29066e9a 100644 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -18,6 +18,7 @@ import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2"; @@ -25,12 +26,13 @@ option java_outer_classname = "HttpConnectionManagerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.http_connection_manager.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 36] +// [#next-free-field: 37] message HttpConnectionManager { enum CodecType { // For every new connection, the connection manager will determine which @@ -334,7 +336,7 @@ message HttpConnectionManager { // timeout, although per-route idle timeout overrides will continue to apply. google.protobuf.Duration stream_idle_timeout = 24; - // A timeout for idle requests managed by the connection manager. + // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. @@ -491,6 +493,18 @@ message HttpConnectionManager { // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of // `HTTP spec ` and is provided for convenience. bool merge_slashes = 33; + + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. + // + // If not set, Envoy uses the default UUID-based behavior: + // + // 1. Request ID is propagated using *x-request-id* header. + // + // 2. Request ID is a universally unique identifier (UUID). + // + // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. + RequestIDExtension request_id_extension = 36; } message Rds { @@ -640,3 +654,8 @@ message HttpFilter { google.protobuf.Any typed_config = 4; } } + +message RequestIDExtension { + // Request ID extension specific configuration. + google.protobuf.Any typed_config = 1; +} diff --git a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto b/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto index 7529bab63f31..ea2f60e71eed 100644 --- a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto +++ b/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.network.kafka_broker.v2alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.kafka_broker.v2alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "KafkaBrokerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.kafka_broker.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Kafka Broker] // Kafka Broker :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto b/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto index 73731971db62..791b767f3e6a 100644 --- a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto +++ b/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto @@ -6,6 +6,7 @@ import "envoy/api/v2/core/base.proto"; import "envoy/type/token_bucket.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.local_rate_limit.v2alpha"; @@ -13,6 +14,7 @@ option java_outer_classname = "LocalRateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.local_ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Local rate limit] // Local rate limit :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto index 59dbb526e757..b261897858e2 100644 --- a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto @@ -5,6 +5,7 @@ package envoy.config.filter.network.mongo_proxy.v2; import "envoy/config/filter/fault/v2/fault.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2"; @@ -12,6 +13,7 @@ option java_outer_classname = "MongoProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.mongo_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Mongo proxy] // MongoDB :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto b/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto index ea63da925fcd..78c6b7e971df 100644 --- a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.filter.network.mysql_proxy.v1alpha1; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1"; @@ -10,6 +11,7 @@ option java_outer_classname = "MysqlProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.mysql_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: MySQL proxy] // MySQL Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto index b50da02c2068..aed56c9af629 100644 --- a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto +++ b/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto @@ -8,6 +8,7 @@ import "envoy/config/ratelimit/v2/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v2"; @@ -15,6 +16,7 @@ option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto b/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto index 8700fa835b0e..ce86794c71cc 100644 --- a/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto +++ b/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto @@ -5,12 +5,14 @@ package envoy.config.filter.network.rbac.v2; import "envoy/config/rbac/v2/rbac.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v2"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.rbac.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index e5ea0dd0362f..caca630fd297 100644 --- a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -7,10 +7,10 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/sensitive.proto"; - import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v2"; @@ -18,6 +18,7 @@ option java_outer_classname = "RedisProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.redis_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Redis Proxy] // Redis Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto b/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto index 1b5ac94af14d..71c161fc48f6 100644 --- a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto +++ b/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.filter.network.sni_cluster.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.sni_cluster.v2"; option java_outer_classname = "SniClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.sni_cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: SNI Cluster Filter] // Set the upstream cluster name from the SNI field in the TLS connection. diff --git a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index 053c1348e921..4ec68f320eed 100644 --- a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -11,6 +11,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2"; @@ -18,6 +19,7 @@ option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.tcp_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: TCP Proxy] // TCP Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto index 669faae7bf64..8230a52e341e 100644 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto +++ b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto @@ -8,6 +8,7 @@ import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; @@ -15,6 +16,7 @@ option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.thrift_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Thrift Proxy Route Configuration] // Thrift Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto index 91ac5da3ef28..96e750ef310d 100644 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; @@ -15,6 +16,7 @@ option java_outer_classname = "ThriftProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.thrift_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/network/wasm/v2/wasm.proto b/generated_api_shadow/envoy/config/filter/network/wasm/v2/wasm.proto deleted file mode 100644 index 34ce8bc12ec8..000000000000 --- a/generated_api_shadow/envoy/config/filter/network/wasm/v2/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.wasm.v2; - -import "envoy/config/wasm/v2/wasm.proto"; - -import "udpa/annotations/migrate.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.wasm.v2"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.wasm.v3"; - -// [#protodoc-title: Wasm] -// Wasm :ref:`configuration overview `. - -message Wasm { - // General Plugin configuration. - config.wasm.v2.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto b/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto index b802bbb04b46..cae622cecc34 100644 --- a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto @@ -5,6 +5,7 @@ package envoy.config.filter.network.zookeeper_proxy.v1alpha1; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1"; @@ -12,6 +13,7 @@ option java_outer_classname = "ZookeeperProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.zookeeper_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: ZooKeeper proxy] // ZooKeeper Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto index 7004b882c5a7..389ddf35990e 100644 --- a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto +++ b/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto @@ -7,6 +7,7 @@ import "envoy/config/ratelimit/v2/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1"; @@ -14,6 +15,7 @@ option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD +++ b/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto b/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto index 8661675ce364..5463ab6513be 100644 --- a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto +++ b/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.config.filter.thrift.router.v2alpha1; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] // Thrift router :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD new file mode 100644 index 000000000000..c6f01577c828 --- /dev/null +++ b/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/api/v2/core:pkg", + "//envoy/data/dns/v2alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto b/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto new file mode 100644 index 000000000000..de2608d44306 --- /dev/null +++ b/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package envoy.config.filter.udp.dns_filter.v2alpha; + +import "envoy/api/v2/core/base.proto"; +import "envoy/data/dns/v2alpha/dns_table.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.filter.udp.dns_filter.v2alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.filter.udp.dns_filter.v3alpha"; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + // This message contains the configuration for the Dns Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v2alpha.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + api.v2.core.DataSource external_dns_table = 2; + } + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration + ServerContextConfig server_config = 2; +} diff --git a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD +++ b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto index 720277aa4ccc..5079c1f0df48 100644 --- a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto @@ -4,11 +4,13 @@ package envoy.config.filter.udp.udp_proxy.v2alpha; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha"; option java_outer_classname = "UdpProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: UDP proxy] // UDP proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto index 9f17b40554be..b63d35af4018 100644 --- a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto +++ b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.config.grpc_credential.v2alpha; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; option java_outer_classname = "AwsIamProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Grpc Credentials AWS IAM] // Configuration for AWS IAM Grpc Credentials Plugin diff --git a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto index c5c6a4d980b9..41e67f0bf24b 100644 --- a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto +++ b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto @@ -5,10 +5,12 @@ package envoy.config.grpc_credential.v2alpha; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; option java_outer_classname = "FileBasedMetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Grpc Credentials File Based Metadata] // Configuration for File Based Metadata Grpc Credentials Plugin diff --git a/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto b/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto index a49436774b2f..eeb5d93ec689 100644 --- a/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto +++ b/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.config.grpc_credential.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; option java_outer_classname = "AwsIamProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Grpc Credentials AWS IAM] // Configuration for AWS IAM Grpc Credentials Plugin diff --git a/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto b/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto index bee16939d7e8..b364d2917099 100644 --- a/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto +++ b/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto @@ -5,11 +5,13 @@ package envoy.config.grpc_credential.v3; import "envoy/config/core/v3/base.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; option java_outer_classname = "FileBasedMetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Grpc Credentials File Based Metadata] // Configuration for File Based Metadata Grpc Credentials Plugin diff --git a/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD b/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD +++ b/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto b/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto index 3f7e15d80d02..0c569f5c75e8 100644 --- a/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto +++ b/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.config.health_checker.redis.v2; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v2"; option java_outer_classname = "RedisProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Redis] // Redis health checker :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/config/listener/v2/api_listener.proto b/generated_api_shadow/envoy/config/listener/v2/api_listener.proto index 3f974cad9e68..6709d5fe0b52 100644 --- a/generated_api_shadow/envoy/config/listener/v2/api_listener.proto +++ b/generated_api_shadow/envoy/config/listener/v2/api_listener.proto @@ -5,11 +5,13 @@ package envoy.config.listener.v2; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v2"; option java_outer_classname = "ApiListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: API listener] diff --git a/generated_api_shadow/envoy/config/listener/v3/BUILD b/generated_api_shadow/envoy/config/listener/v3/BUILD index 0813c2cbcac1..e67314794940 100644 --- a/generated_api_shadow/envoy/config/listener/v3/BUILD +++ b/generated_api_shadow/envoy/config/listener/v3/BUILD @@ -8,6 +8,7 @@ api_proto_package( deps = [ "//envoy/api/v2:pkg", "//envoy/api/v2/listener:pkg", + "//envoy/config/accesslog/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/listener/v2:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", diff --git a/generated_api_shadow/envoy/config/listener/v3/api_listener.proto b/generated_api_shadow/envoy/config/listener/v3/api_listener.proto index c8c3ea115963..4d3879a22b1d 100644 --- a/generated_api_shadow/envoy/config/listener/v3/api_listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/api_listener.proto @@ -4,11 +4,13 @@ package envoy.config.listener.v3; import "google/protobuf/any.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "ApiListenerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: API listener] diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto index d8b15abc22f1..2b4ecb826d86 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.listener.v3; +import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/socket_option.proto"; @@ -13,18 +14,19 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` -// [#next-free-field: 22] +// [#next-free-field: 23] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -102,23 +104,6 @@ message Listener { // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; - // If a connection is redirected using *iptables*, the port on which the proxy - // receives it might be different from the original destination address. When this flag is set to - // true, the listener hands off redirected connections to the listener associated with the - // original destination address. If there is no listener associated with the original destination - // address, the connection is handled by the listener that receives it. Defaults to false. - // - // .. attention:: - // - // This field is deprecated. Use :ref:`an original_dst ` - // :ref:`listener filter ` instead. - // - // Note that hand off to another listener is *NOT* performed without this flag. Once - // :ref:`FilterChainMatch ` is implemented this flag - // will be removed, as filter chain matching can be used to select a filter chain based on the - // restored destination address. - google.protobuf.BoolValue hidden_envoy_deprecated_use_original_dst = 4 [deprecated = true]; - // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; @@ -247,4 +232,10 @@ message Listener { // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket // `_. bool reuse_port = 21; + + // Configuration for :ref:`access logs ` + // emitted by this listener. + repeated accesslog.v3.AccessLog access_log = 22; + + google.protobuf.BoolValue hidden_envoy_deprecated_use_original_dst = 4 [deprecated = true]; } diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto index efbe0cee68be..25d39e24620e 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto @@ -11,13 +11,14 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "ListenerComponentsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener components] // Listener :ref:`configuration overview ` @@ -34,9 +35,9 @@ message Filter { // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 4; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } @@ -174,15 +175,6 @@ message FilterChain { // The criteria to use when matching a connection to this filter chain. FilterChainMatch filter_chain_match = 1; - // The TLS context for this filter chain. - // - // .. attention:: - // - // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - // set, `transport_socket` takes priority. - envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - hidden_envoy_deprecated_tls_context = 2 [deprecated = true]; - // A list of individual network filters that make up the filter chain for // connections established with the listener. Order matters as the filters are // processed sequentially as connection events happen. Note: If the filter @@ -211,11 +203,37 @@ message FilterChain { // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter // chain is to be dynamically updated or removed via FCDS a unique name must be provided. string name = 7; + + envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + hidden_envoy_deprecated_tls_context = 2 [deprecated = true]; } -// [#not-implemented-hide:] // Listener filter chain match configuration. This is a recursive structure which allows complex // nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3306 +// - destination_port_range: +// start: 15000 +// end: 15001 +// // [#next-free-field: 6] message ListenerFilterChainMatchPredicate { option (udpa.annotations.versioning).previous_message_type = @@ -262,25 +280,16 @@ message ListenerFilter { // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_bytes: 1}]; + ListenerFilterChainMatchPredicate filter_disabled = 4; + // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. google.protobuf.Any typed_config = 3; - } - // [#not-implemented-hide:] - // Decide when to disable this listener filter on incoming traffic. - // Example: - // 0. always enable filter - // don't set `filter_disabled` - // 1. disable when the destination port is 3306 - // rule.destination_port_range = Int32Range {start = 3306, end = 3307} - // 2. disable when the destination port is 3306 or 15000 - // rule.or_match = MatchSet.rules [ - // rule.destination_port_range = Int32Range {start = 3306, end = 3307}, - // rule.destination_port_range = Int32Range {start = 15000, end = 15001}, - // ] - ListenerFilterChainMatchPredicate filter_disabled = 4; + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; + } } diff --git a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto b/generated_api_shadow/envoy/config/listener/v3/quic_config.proto index 76345d2973cc..9949da2e0d70 100644 --- a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto +++ b/generated_api_shadow/envoy/config/listener/v3/quic_config.proto @@ -5,11 +5,13 @@ package envoy.config.listener.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "QuicConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: QUIC listener Config] diff --git a/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto b/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto index 7dba41df8d3b..7d9bef4b6769 100644 --- a/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto +++ b/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto @@ -5,11 +5,13 @@ package envoy.config.listener.v3; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: UDP Listener Config] // Listener :ref:`configuration overview ` @@ -26,9 +28,9 @@ message UdpListenerConfig { // Used to create a specific listener factory. To some factory, e.g. // "raw_udp_listener", config is not needed. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } diff --git a/generated_api_shadow/envoy/config/metrics/v2/BUILD b/generated_api_shadow/envoy/config/metrics/v2/BUILD index a0eac27f8a5d..94999290bca3 100644 --- a/generated_api_shadow/envoy/config/metrics/v2/BUILD +++ b/generated_api_shadow/envoy/config/metrics/v2/BUILD @@ -8,5 +8,6 @@ api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type/matcher:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto index d2f60a6a67a7..f1f8662f0750 100644 --- a/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto +++ b/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto @@ -4,11 +4,13 @@ package envoy.config.metrics.v2; import "envoy/api/v2/core/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v2"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metrics service] diff --git a/generated_api_shadow/envoy/config/metrics/v2/stats.proto b/generated_api_shadow/envoy/config/metrics/v2/stats.proto index d3f797543a42..c6113bf5a5d3 100644 --- a/generated_api_shadow/envoy/config/metrics/v2/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v2/stats.proto @@ -9,11 +9,13 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v2"; option java_outer_classname = "StatsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Stats] // Statistics :ref:`architecture overview `. diff --git a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto index 002aa7482e7d..ad9879055ba3 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto @@ -4,13 +4,14 @@ package envoy.config.metrics.v3; import "envoy/config/core/v3/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v3"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metrics service] diff --git a/generated_api_shadow/envoy/config/metrics/v3/stats.proto b/generated_api_shadow/envoy/config/metrics/v3/stats.proto index fa734b6cdd13..bd5e0e8c4973 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/stats.proto @@ -9,13 +9,14 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v3"; option java_outer_classname = "StatsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Stats] // Statistics :ref:`architecture overview `. @@ -38,9 +39,9 @@ message StatsSink { // Stats sink specific configuration which depends on the sink being instantiated. See // :ref:`StatsdSink ` for an example. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } @@ -262,47 +263,47 @@ message TagSpecifier { message StatsdSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsdSink"; + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + string prefix = 3; + oneof statsd_specifier { option (validate.required) = true; - // The UDP address of a running `statsd `_ - // compliant listener. If specified, statistics will be flushed to this - // address. - core.v3.Address address = 1; - // The name of a cluster that is running a TCP `statsd // `_ compliant listener. If specified, // Envoy will connect to this cluster to flush statistics. + core.v3.Address address = 1; + + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms string tcp_cluster_name = 2; } - - // Optional custom prefix for StatsdSink. If - // specified, this will override the default prefix. - // For example: - // - // .. code-block:: json - // - // { - // "prefix" : "envoy-prod" - // } - // - // will change emitted stats to - // - // .. code-block:: cpp - // - // envoy-prod.test_counter:1|c - // envoy-prod.test_timer:5|ms - // - // Note that the default prefix, "envoy", will be used if a prefix is not - // specified. - // - // Stats with default prefix: - // - // .. code-block:: cpp - // - // envoy.test_counter:1|c - // envoy.test_timer:5|ms - string prefix = 3; } // Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. @@ -316,17 +317,17 @@ message DogStatsdSink { reserved 2; + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. + string prefix = 3; + oneof dog_statsd_specifier { option (validate.required) = true; - // The UDP address of a running DogStatsD compliant listener. If specified, - // statistics will be flushed to this address. + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. core.v3.Address address = 1; } - - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. - string prefix = 3; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. diff --git a/generated_api_shadow/envoy/config/overload/v2alpha/BUILD b/generated_api_shadow/envoy/config/overload/v2alpha/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/overload/v2alpha/BUILD +++ b/generated_api_shadow/envoy/config/overload/v2alpha/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto b/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto index ff71a1e5dcca..03886cdee6d6 100644 --- a/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto +++ b/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto @@ -6,11 +6,13 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.overload.v2alpha"; option java_outer_classname = "OverloadProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Overload Manager] diff --git a/generated_api_shadow/envoy/config/overload/v3/overload.proto b/generated_api_shadow/envoy/config/overload/v3/overload.proto index 6f86362cc6af..337150657b14 100644 --- a/generated_api_shadow/envoy/config/overload/v3/overload.proto +++ b/generated_api_shadow/envoy/config/overload/v3/overload.proto @@ -6,13 +6,14 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.overload.v3"; option java_outer_classname = "OverloadProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Overload Manager] @@ -36,9 +37,9 @@ message ResourceMonitor { // Configuration for the resource monitor being instantiated. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } diff --git a/generated_api_shadow/envoy/config/ratelimit/v2/BUILD b/generated_api_shadow/envoy/config/ratelimit/v2/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/generated_api_shadow/envoy/config/ratelimit/v2/BUILD +++ b/generated_api_shadow/envoy/config/ratelimit/v2/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto b/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto index e617ec22f5de..92801ea7b968 100644 --- a/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto +++ b/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto @@ -4,11 +4,13 @@ package envoy.config.ratelimit.v2; import "envoy/api/v2/core/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.ratelimit.v2"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit service] diff --git a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto index efd056711a96..bb3c538bbabf 100644 --- a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto +++ b/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto @@ -4,13 +4,14 @@ package envoy.config.ratelimit.v3; import "envoy/config/core/v3/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.ratelimit.v3"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit service] diff --git a/generated_api_shadow/envoy/config/rbac/v2/BUILD b/generated_api_shadow/envoy/config/rbac/v2/BUILD index 064f8f79e0b8..d5b5dda169a0 100644 --- a/generated_api_shadow/envoy/config/rbac/v2/BUILD +++ b/generated_api_shadow/envoy/config/rbac/v2/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/api/v2/core:pkg", "//envoy/api/v2/route:pkg", "//envoy/type/matcher:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/generated_api_shadow/envoy/config/rbac/v2/rbac.proto b/generated_api_shadow/envoy/config/rbac/v2/rbac.proto index fa5d27fdf673..943ac33e0859 100644 --- a/generated_api_shadow/envoy/config/rbac/v2/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v2/rbac.proto @@ -10,11 +10,13 @@ import "envoy/type/matcher/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.rbac.v2"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Role Based Access Control (RBAC)] @@ -168,7 +170,7 @@ message Permission { } // Principal defines an identity or a group of identities for a downstream subject. -// [#next-free-field: 10] +// [#next-free-field: 12] message Principal { // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, // each are applied with the associated behavior. @@ -202,7 +204,21 @@ message Principal { Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. - api.v2.core.CidrRange source_ip = 5; + // This address will honor proxy protocol, but will not honor XFF. + api.v2.core.CidrRange source_ip = 5 [deprecated = true]; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This is always the physical peer even if the + // :ref:`remote_ip ` is inferred + // from for example the x-forwarder-for header, proxy protocol, etc. + api.v2.core.CidrRange direct_remote_ip = 10; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This may not be the physical peer and could be different from the + // :ref:`direct_remote_ip `. + // E.g, if the remote ip is inferred from for example the x-forwarder-for header, + // proxy protocol, etc. + api.v2.core.CidrRange remote_ip = 11; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. diff --git a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto index 0d9b552d85cc..040f537d1f5c 100644 --- a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto @@ -10,13 +10,14 @@ import "envoy/type/matcher/v3/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.rbac.v3"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Role Based Access Control (RBAC)] @@ -179,7 +180,7 @@ message Permission { } // Principal defines an identity or a group of identities for a downstream subject. -// [#next-free-field: 10] +// [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; @@ -221,7 +222,21 @@ message Principal { Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. - core.v3.CidrRange source_ip = 5; + // This address will honor proxy protocol, but will not honor XFF. + core.v3.CidrRange source_ip = 5 [deprecated = true]; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This is always the physical peer even if the + // :ref:`remote_ip ` is inferred + // from for example the x-forwarder-for header, proxy protocol, etc. + core.v3.CidrRange direct_remote_ip = 10; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This may not be the physical peer and could be different from the + // :ref:`direct_remote_ip `. + // E.g, if the remote ip is inferred from for example the x-forwarder-for header, + // proxy protocol, etc. + core.v3.CidrRange remote_ip = 11; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD b/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD new file mode 100644 index 000000000000..dbfa8be4f36f --- /dev/null +++ b/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/rbac/v3:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", + ], +) diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto new file mode 100644 index 000000000000..cdbeb5bf2eef --- /dev/null +++ b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto @@ -0,0 +1,258 @@ +syntax = "proto3"; + +package envoy.config.rbac.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v3/metadata.proto"; +import "envoy/type/matcher/v3/path.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "google/api/expr/v1alpha1/syntax.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.rbac.v4alpha"; +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Role Based Access Control (RBAC)] + +// Role Based Access Control (RBAC) provides service-level and method-level access control for a +// service. RBAC policies are additive. The policies are examined in order. A request is allowed +// once a matching policy is found (suppose the `action` is ALLOW). +// +// Here is an example of RBAC configuration. It has two policies: +// +// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so +// does "cluster.local/ns/default/sa/superuser". +// +// * Any user can read ("GET") the service at paths with prefix "/products", so long as the +// destination port is either 80 or 443. +// +// .. code-block:: yaml +// +// action: ALLOW +// policies: +// "service-admin": +// permissions: +// - any: true +// principals: +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/admin" +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/superuser" +// "product-viewer": +// permissions: +// - and_rules: +// rules: +// - header: { name: ":method", exact_match: "GET" } +// - url_path: +// path: { prefix: "/products" } +// - or_rules: +// rules: +// - destination_port: 80 +// - destination_port: 443 +// principals: +// - any: true +// +message RBAC { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.RBAC"; + + // Should we do safe-list or block-list style access control? + enum Action { + // The policies grant access to principals. The rest is denied. This is safe-list style + // access control. This is the default type. + ALLOW = 0; + + // The policies deny access to principals. The rest is allowed. This is block-list style + // access control. + DENY = 1; + } + + // The action to take if a policy matches. The request is allowed if and only if: + // + // * `action` is "ALLOWED" and at least one policy matches + // * `action` is "DENY" and none of the policies match + Action action = 1; + + // Maps from policy name to policy. A match occurs when at least one policy matches the request. + map policies = 2; +} + +// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if +// and only if at least one of its permissions match the action taking place AND at least one of its +// principals match the downstream AND the condition is true if specified. +message Policy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Policy"; + + // Required. The set of permissions that define a role. Each permission is matched with OR + // semantics. To match all actions for this policy, a single Permission with the `any` field set + // to true should be used. + repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Required. The set of principals that are assigned/denied the role based on “actionâ€. Each + // principal is matched with OR semantics. To match all downstreams for this policy, a single + // Principal with the `any` field set to true should be used. + repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; + + // An optional symbolic expression specifying an access control + // :ref:`condition `. The condition is combined + // with the permissions and the principals as a clause with AND semantics. + google.api.expr.v1alpha1.Expr condition = 3; +} + +// Permission defines an action (or actions) that a principal can take. +// [#next-free-field: 11] +message Permission { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Permission"; + + // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, + // each are applied with the associated behavior. + message Set { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.rbac.v3.Permission.Set"; + + repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + oneof rule { + option (validate.required) = true; + + // A set of rules that all must match in order to define the action. + Set and_rules = 1; + + // A set of rules where at least one must match in order to define the action. + Set or_rules = 2; + + // When any is set, it matches any action. + bool any = 3 [(validate.rules).bool = {const: true}]; + + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only + // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` + // field if you want to match the URL path without the query and fragment string. + route.v4alpha.HeaderMatcher header = 4; + + // A URL path on the incoming HTTP request. Only available for HTTP. + type.matcher.v3.PathMatcher url_path = 10; + + // A CIDR block that describes the destination IP. + core.v4alpha.CidrRange destination_ip = 5; + + // A port number that describes the destination port connecting to. + uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; + + // Metadata that describes additional information about the action. + type.matcher.v3.MetadataMatcher metadata = 7; + + // Negates matching the provided permission. For instance, if the value of `not_rule` would + // match, this permission would not match. Conversely, if the value of `not_rule` would not + // match, this permission would match. + Permission not_rule = 8; + + // The request server from the client's connection request. This is + // typically TLS SNI. + // + // .. attention:: + // + // The behavior of this field may be affected by how Envoy is configured + // as explained below. + // + // * If the :ref:`TLS Inspector ` + // filter is not added, and if a `FilterChainMatch` is not defined for + // the :ref:`server name `, + // a TLS connection's requested SNI server name will be treated as if it + // wasn't present. + // + // * A :ref:`listener filter ` may + // overwrite a connection's requested server name within Envoy. + // + // Please refer to :ref:`this FAQ entry ` to learn to + // setup SNI. + type.matcher.v3.StringMatcher requested_server_name = 9; + } +} + +// Principal defines an identity or a group of identities for a downstream subject. +// [#next-free-field: 12] +message Principal { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal"; + + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, + // each are applied with the associated behavior. + message Set { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.rbac.v3.Principal.Set"; + + repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // Authentication attributes for a downstream. + message Authenticated { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.rbac.v3.Principal.Authenticated"; + + reserved 1; + + // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the + // certificate, otherwise the subject field is used. If unset, it applies to any user that is + // authenticated. + type.matcher.v3.StringMatcher principal_name = 2; + } + + oneof identifier { + option (validate.required) = true; + + // A set of identifiers that all must match in order to define the downstream. + Set and_ids = 1; + + // A set of identifiers at least one must match in order to define the downstream. + Set or_ids = 2; + + // When any is set, it matches any downstream. + bool any = 3 [(validate.rules).bool = {const: true}]; + + // Authenticated attributes that identify the downstream. + Authenticated authenticated = 4; + + // A CIDR block that describes the downstream IP. + // This address will honor proxy protocol, but will not honor XFF. + core.v4alpha.CidrRange hidden_envoy_deprecated_source_ip = 5 [deprecated = true]; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This is always the physical peer even if the + // :ref:`remote_ip ` is inferred + // from for example the x-forwarder-for header, proxy protocol, etc. + core.v4alpha.CidrRange direct_remote_ip = 10; + + // A CIDR block that describes the downstream remote/origin address. + // Note: This may not be the physical peer and could be different from the + // :ref:`direct_remote_ip `. + // E.g, if the remote ip is inferred from for example the x-forwarder-for header, + // proxy protocol, etc. + core.v4alpha.CidrRange remote_ip = 11; + + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only + // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` + // field if you want to match the URL path without the query and fragment string. + route.v4alpha.HeaderMatcher header = 6; + + // A URL path on the incoming HTTP request. Only available for HTTP. + type.matcher.v3.PathMatcher url_path = 9; + + // Metadata that describes additional information about the principal. + type.matcher.v3.MetadataMatcher metadata = 7; + + // Negates matching the provided principal. For instance, if the value of `not_id` would match, + // this principal would not match. Conversely, if the value of `not_id` would not match, this + // principal would match. + Principal not_id = 8; + } +} diff --git a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD b/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD +++ b/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto index 0ec5c9424edd..529622a071e7 100644 --- a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto +++ b/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.config.resource_monitor.fixed_heap.v2alpha; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha"; option java_outer_classname = "FixedHeapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Fixed heap] // [#extension: envoy.resource_monitors.fixed_heap] diff --git a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD b/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD +++ b/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto b/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto index 1073e16b1b6e..a9f056d2d29a 100644 --- a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto +++ b/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.config.resource_monitor.injected_resource.v2alpha; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha"; option java_outer_classname = "InjectedResourceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Injected resource] // [#extension: envoy.resource_monitors.injected_resource] diff --git a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD b/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD +++ b/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto b/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto index b4cf8c0cbcf2..107bf6fc2dbe 100644 --- a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto +++ b/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.config.retry.omit_canary_hosts.v2; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.config.retry.omit_canary_hosts.v2"; option java_outer_classname = "OmitCanaryHostsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Omit Canary Hosts Predicate] // [#extension: envoy.retry_host_predicates.omit_canary_hosts] diff --git a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto b/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto index 35bd5d00910c..d229cffef8ca 100644 --- a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto +++ b/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto @@ -5,12 +5,14 @@ package envoy.config.retry.omit_host_metadata.v2; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.retry.omit_host_metadata.v2"; option java_outer_classname = "OmitHostMetadataConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.retry.host.omit_host_metadata.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Omit host metadata retry predicate] diff --git a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD b/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD +++ b/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto b/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto index 75532397ff7c..e87e8cd70eaf 100644 --- a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto +++ b/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.config.retry.previous_hosts.v2; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.config.retry.previous_hosts.v2"; option java_outer_classname = "PreviousHostsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Previous Hosts Predicate] // [#extension: envoy.retry_host_predicates.previous_hosts] diff --git a/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD b/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD +++ b/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto b/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto index 4e1703cd2529..e96741178576 100644 --- a/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto +++ b/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.config.retry.previous_priorities; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.retry.previous_priorities"; option java_outer_classname = "PreviousPrioritiesConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Previous priorities retry selector] diff --git a/generated_api_shadow/envoy/config/route/v3/route.proto b/generated_api_shadow/envoy/config/route/v3/route.proto index 5a1c4204c9b0..a528d99bd448 100644 --- a/generated_api_shadow/envoy/config/route/v3/route.proto +++ b/generated_api_shadow/envoy/config/route/v3/route.proto @@ -8,13 +8,14 @@ import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP route configuration] // * Routing :ref:`architecture overview ` diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 86bc40458c69..8ef58ba20798 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -14,14 +14,15 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v3"; option java_outer_classname = "RouteComponentsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP route components] // * Routing :ref:`architecture overview ` @@ -123,14 +124,6 @@ message VirtualHost { // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map hidden_envoy_deprecated_per_filter_config = 12 - [deprecated = true]; - // The per_filter_config field can be used to provide virtual host-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter @@ -181,6 +174,9 @@ message VirtualHost { // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum // value of this and the listener per_connection_buffer_limit_bytes. google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; + + map hidden_envoy_deprecated_per_filter_config = 12 + [deprecated = true]; } // A filter-defined action type. @@ -209,48 +205,38 @@ message Route { // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; - oneof action { - option (validate.required) = true; - - // Route request to some upstream cluster. - RouteAction route = 2; + // Route request to some upstream cluster. + core.v3.Metadata metadata = 4; - // Return a redirect. - RedirectAction redirect = 3; + // Return a redirect. + Decorator decorator = 5; - // Return an arbitrary HTTP response directly, without proxying. - DirectResponseAction direct_response = 7; + // Return an arbitrary HTTP response directly, without proxying. + map typed_per_filter_config = 13; - // [#not-implemented-hide:] - // If true, a filter will define the action (e.g., it could dynamically generate the - // RouteAction). - FilterAction filter_action = 17; - } + // [#not-implemented-hide:] + // If true, a filter will define the action (e.g., it could dynamically generate the + // RouteAction). + repeated core.v3.HeaderValueOption request_headers_to_add = 9 + [(validate.rules).repeated = {max_items: 1000}]; // The Metadata field can be used to provide additional information // about the route. It can be used for configuration, stats, and logging. // The metadata should go under the filter namespace that will need it. // For instance, if the metadata is intended for the Router filter, // the filter name should be specified as *envoy.filters.http.router*. - core.v3.Metadata metadata = 4; + repeated string request_headers_to_remove = 12; // Decorator for the matched route. - Decorator decorator = 5; - - // The per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - map hidden_envoy_deprecated_per_filter_config = 8 - [deprecated = true]; + repeated core.v3.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; // The typed_per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` for // if and how it is utilized. - map typed_per_filter_config = 13; + repeated string response_headers_to_remove = 11; // Specifies a set of headers that will be added to requests matching this // route. Headers specified at this level are applied before headers from the @@ -258,12 +244,11 @@ message Route { // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. - repeated core.v3.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; + Tracing tracing = 15; // Specifies a list of HTTP headers that should be removed from each request // matching this route. - repeated string request_headers_to_remove = 12; + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; // Specifies a set of headers that will be added to responses to requests // matching this route. Headers specified at this level are applied before @@ -271,21 +256,27 @@ message Route { // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on // :ref:`custom request headers `. - repeated core.v3.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; + map hidden_envoy_deprecated_per_filter_config = 8 + [deprecated = true]; - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - repeated string response_headers_to_remove = 11; + oneof action { + option (validate.required) = true; - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - Tracing tracing = 15; + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + RouteAction route = 2; - // The maximum bytes which will be buffered for retries and shadowing. - // If set, the bytes actually buffered will be the minimum value of this and the - // listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + RedirectAction redirect = 3; + + // The maximum bytes which will be buffered for retries and shadowing. + // If set, the bytes actually buffered will be the minimum value of this and the + // listener per_connection_buffer_limit_bytes. + DirectResponseAction direct_response = 7; + + FilterAction filter_action = 17; + } } // Compared to the :ref:`cluster ` field that specifies a @@ -354,15 +345,10 @@ message WeightedCluster { // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. + map typed_per_filter_config = 10; + map hidden_envoy_deprecated_per_filter_config = 8 [deprecated = true]; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map typed_per_filter_config = 10; } // Specifies one or more upstream clusters associated with the route. @@ -407,57 +393,31 @@ message RouteMatch { reserved 5; - oneof path_specifier { - option (validate.required) = true; - - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - string prefix = 1; - - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - string path = 2; + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the *:path* header. + google.protobuf.BoolValue case_sensitive = 4; - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. The regex grammar is defined `here - // `_. - // - // Examples: - // - // * The regex ``/b[io]t`` matches the path */bit* - // * The regex ``/b[io]t`` matches the path */bot* - // * The regex ``/b[io]t`` does not match the path */bite* - // * The regex ``/b[io]t`` does not match the path */bit/bot* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex` as it is not safe for use with - // untrusted input in all cases. - string hidden_envoy_deprecated_regex = 3 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; + // If specified, the route is an exact path rule meaning that the path must + // exactly match the *:path* header once the query string is removed. + core.v3.RuntimeFractionalPercent runtime_fraction = 9; - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - } + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + repeated HeaderMatcher headers = 6; // Indicates that prefix/path matching should be case insensitive. The default // is true. - google.protobuf.BoolValue case_sensitive = 4; + repeated QueryParameterMatcher query_parameters = 7; // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by @@ -475,57 +435,48 @@ message RouteMatch { // integer with the assumption that the value is an integral percentage out of 100. For // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - core.v3.RuntimeFractionalPercent runtime_fraction = 9; + GrpcRouteMatchOptions grpc = 8; // Specifies a set of headers that the route should match on. The router will // check the request’s headers against all the specified headers in the route // config. A match will happen if all the headers in the route are present in // the request with the same values (or based on presence if the value field // is not in the config). - repeated HeaderMatcher headers = 6; + TlsContextMatchOptions tls_context = 11; - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - repeated QueryParameterMatcher query_parameters = 7; + oneof path_specifier { + option (validate.required) = true; - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - GrpcRouteMatchOptions grpc = 8; + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the *path* header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the *path* header's + // query string for a match to occur. + string prefix = 1; - // If specified, the client tls context will be matched against the defined - // match options. - // - // [#next-major-version: unify with RBAC] - TlsContextMatchOptions tls_context = 11; + // If specified, only gRPC requests will be matched. The router will check + // that the content-type header has a application/grpc or one of the various + // application/grpc+ values. + string path = 2; + + // If specified, the client tls context will be matched against the defined + // match options. + // + // [#next-major-version: unify with RBAC] + type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + string hidden_envoy_deprecated_regex = 3 [ + deprecated = true, + (validate.rules).string = {max_bytes: 1024}, + (envoy.annotations.disallowed_by_default) = true + ]; + } } // [#next-free-field: 12] message CorsPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.CorsPolicy"; - // Specifies the origins that will be allowed to do CORS requests. - // - // An origin is allowed if either allow_origin or allow_origin_regex match. - // - // .. attention:: - // This field has been deprecated in favor of `allow_origin_string_match`. - repeated string hidden_envoy_deprecated_allow_origin = 1 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies regex patterns that match allowed origins. - // - // An origin is allowed if either allow_origin or allow_origin_regex match. - // - // .. attention:: - // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for - // use with untrusted input in all cases. - repeated string hidden_envoy_deprecated_allow_origin_regex = 8 - [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; - // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; @@ -545,25 +496,14 @@ message CorsPolicy { // Specifies whether the resource allows credentials. google.protobuf.BoolValue allow_credentials = 6; - oneof enabled_specifier { - // Specifies if the CORS filter is enabled. Defaults to true. Only effective on route. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`filter_enabled` field instead. - google.protobuf.BoolValue hidden_envoy_deprecated_enabled = 7 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies the % of requests for which the CORS filter is enabled. - // - // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - // filter will be enabled for 100% of the requests. - // - // If :ref:`runtime_key ` is - // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - core.v3.RuntimeFractionalPercent filter_enabled = 9; - } + // Specifies the % of requests for which the CORS filter is enabled. + // + // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS + // filter will be enabled for 100% of the requests. + // + // If :ref:`runtime_key ` is + // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + core.v3.RuntimeFractionalPercent shadow_enabled = 10; // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not // enforced. @@ -574,7 +514,18 @@ message CorsPolicy { // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate // and track the request's *Origin* to determine if it's valid but will not enforce any policies. - core.v3.RuntimeFractionalPercent shadow_enabled = 10; + repeated string hidden_envoy_deprecated_allow_origin = 1 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + repeated string hidden_envoy_deprecated_allow_origin_regex = 8 + [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; + + oneof enabled_specifier { + core.v3.RuntimeFractionalPercent filter_enabled = 9; + + google.protobuf.BoolValue hidden_envoy_deprecated_enabled = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + } } // [#next-free-field: 34] @@ -614,24 +565,6 @@ message RouteAction { // exist in the cluster manager configuration. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - // If not specified, all requests to the target cluster will be mirrored. If - // specified, Envoy will lookup the runtime key to get the % of requests to - // mirror. Valid values are from 0 to 10000, allowing for increments of - // 0.01% of requests to be mirrored. If the runtime key is specified in the - // configuration but not present in runtime, 0 is the default and thus 0% of - // requests will be mirrored. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`runtime_fraction - // ` - // field instead. Mirroring occurs if both this and - // ` - // are not set. - string hidden_envoy_deprecated_runtime_key = 2 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // If not specified, all requests to the target cluster will be mirrored. // // If specified, this field takes precedence over the `runtime_key` field and requests must also @@ -644,6 +577,9 @@ message RouteAction { // Determines if the trace span should be sampled. Defaults to true. google.protobuf.BoolValue trace_sampled = 4; + + string hidden_envoy_deprecated_runtime_key = 2 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer @@ -725,45 +661,45 @@ message RouteAction { string key = 1 [(validate.rules).string = {min_bytes: 1}]; } + // Header hash policy. + bool terminal = 4; + oneof policy_specifier { option (validate.required) = true; - // Header hash policy. + // Cookie hash policy. Header header = 1; - // Cookie hash policy. + // Connection properties hash policy. Cookie cookie = 2; - // Connection properties hash policy. + // Query parameter hash policy. ConnectionProperties connection_properties = 3; - // Query parameter hash policy. + // Filter state hash policy. QueryParameter query_parameter = 5; - // Filter state hash policy. + // The flag that short-circuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. FilterState filter_state = 6; } - - // The flag that short-circuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; } // Allows enabling and disabling upgrades on a per-route basis. @@ -788,44 +724,40 @@ message RouteAction { reserved 12, 18, 19, 16, 22, 21; - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + // Indicates the upstream cluster to which the request should be routed + // to. + ClusterNotFoundResponseCode cluster_not_found_response_code = 20 + [(validate.rules).enum = {defined_only: true}]; - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - string cluster_header = 2 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + core.v3.Metadata metadata_match = 4; - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - WeightedCluster weighted_clusters = 3; - } + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The HTTP status code to use when configured cluster is not found. // The default response code is 503 Service Unavailable. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what's set in this field will be considered // for load balancing. If using :ref:`weighted_clusters // `, metadata will be merged, with values // provided there taking precedence. The filter name should be specified as *envoy.lb*. - core.v3.Metadata metadata_match = 4; + google.protobuf.Duration timeout = 8; // Indicates that during forwarding, the matched prefix (or path) should be // swapped with this value. This option allows application URLs to be rooted @@ -858,8 +790,7 @@ message RouteAction { // // Having above entries in the config, requests to */prefix* will be stripped to */*, while // requests to */prefix/etc* will be stripped to */etc*. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + google.protobuf.Duration idle_timeout = 24; // Indicates that during forwarding, portions of the path that match the // pattern should be rewritten, even allowing the substitution of capture @@ -889,32 +820,28 @@ message RouteAction { // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + RetryPolicy retry_policy = 9; - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite_literal = 6 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + // Indicates that during forwarding, the host header will be swapped with + // this value. + google.protobuf.Any retry_policy_typed_config = 33; - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - google.protobuf.BoolValue auto_host_rewrite = 7; + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type *strict_dns* or *logical_dns*. Setting this to true with other cluster + // types has no effect. + repeated RequestMirrorPolicy request_mirror_policies = 30; - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - string host_rewrite_header = 29 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - } + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; // Specifies the upstream timeout for the route. If not specified, the default is 15s. This // spans between the point at which the entire downstream request (i.e. end-of-stream) has been @@ -927,7 +854,7 @@ message RouteAction { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - google.protobuf.Duration timeout = 8; + repeated RateLimit rate_limits = 13; // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, // although the connection manager wide :ref:`stream_idle_timeout @@ -947,42 +874,35 @@ message RouteAction { // fires, the stream is terminated with a 408 Request Timeout error code if no // upstream response header has been received, otherwise a stream reset // occurs. - google.protobuf.Duration idle_timeout = 24; + google.protobuf.BoolValue include_vh_rate_limits = 14; // Indicates that the route has a retry policy. Note that if this is set, // it'll take precedence over the virtual host level retry policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). - RetryPolicy retry_policy = 9; + repeated HashPolicy hash_policy = 15; // [#not-implemented-hide:] // Specifies the configuration for retry policy extension. Note that if this is set, it'll take // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, // most internal one becomes the enforced policy). :ref:`Retry policy ` // should not be set if this field is used. - google.protobuf.Any retry_policy_typed_config = 33; - - // Indicates that the route has a request mirroring policy. - // - // .. attention:: - // This field has been deprecated in favor of `request_mirror_policies` which supports one or - // more mirroring policies. - RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 [deprecated = true]; + CorsPolicy cors = 17; // Indicates that the route has request mirroring policies. - repeated RequestMirrorPolicy request_mirror_policies = 30; + google.protobuf.Duration max_grpc_timeout = 23; // Optionally specifies the :ref:`routing priority `. - core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; + google.protobuf.Duration grpc_timeout_offset = 28; // Specifies a set of rate limit configurations that could be applied to the // route. - repeated RateLimit rate_limits = 13; + repeated UpgradeConfig upgrade_configs = 25; // Specifies if the rate limit filter should include the virtual host rate // limits. By default, if the route configured rate limits, the virtual host // :ref:`rate_limits ` are not applied to the // request. - google.protobuf.BoolValue include_vh_rate_limits = 14; + InternalRedirectAction internal_redirect_action = 26; // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to @@ -996,10 +916,10 @@ message RouteAction { // backend). If a hash policy has the "terminal" attribute set to true, and // there is already a hash generated, the hash is returned immediately, // ignoring the rest of the hash policy list. - repeated HashPolicy hash_policy = 15; + google.protobuf.UInt32Value max_internal_redirects = 31; // Indicates that the route has a CORS policy. - CorsPolicy cors = 17; + HedgePolicy hedge_policy = 27; // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, @@ -1020,41 +940,52 @@ message RouteAction { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - google.protobuf.Duration max_grpc_timeout = 23; + RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 [deprecated = true]; - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - google.protobuf.Duration grpc_timeout_offset = 28; + oneof cluster_specifier { + option (validate.required) = true; - repeated UpgradeConfig upgrade_configs = 25; + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - InternalRedirectAction internal_redirect_action = 26; + string cluster_header = 2 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; + WeightedCluster weighted_clusters = 3; + } - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - HedgePolicy hedge_policy = 27; + oneof host_rewrite_specifier { + // An internal redirect is handled, iff the number of previous internal redirects that a + // downstream request has encountered is lower than this value, and + // :ref:`internal_redirect_action ` + // is set to :ref:`HANDLE_INTERNAL_REDIRECT + // ` + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or has + // :ref:`internal_redirect_action ` + // set to + // :ref:`PASS_THROUGH_INTERNAL_REDIRECT + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + string host_rewrite_literal = 6 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + google.protobuf.BoolValue auto_host_rewrite = 7; + + string host_rewrite_header = 29 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + } } // HTTP retry :ref:`architecture overview `. @@ -1069,9 +1000,9 @@ message RetryPolicy { string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } @@ -1082,9 +1013,9 @@ message RetryPolicy { string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } @@ -1214,30 +1145,27 @@ message RedirectAction { PERMANENT_REDIRECT = 4; } - // When the scheme redirection take place, the following rules apply: - // 1. If the source URI scheme is `http` and the port is explicitly - // set to `:80`, the port will be removed after the redirection - // 2. If the source URI scheme is `https` and the port is explicitly - // set to `:443`, the port will be removed after the redirection - oneof scheme_rewrite_specifier { - // The scheme portion of the URL will be swapped with "https". - bool https_redirect = 4; + // The scheme portion of the URL will be swapped with "https". + string host_redirect = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - // The scheme portion of the URL will be swapped with this value. - string scheme_redirect = 7; - } + // The scheme portion of the URL will be swapped with this value. + uint32 port_redirect = 8; // The host portion of the URL will be swapped with this value. - string host_redirect = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; // The port value of the URL will be swapped with this value. - uint32 port_redirect = 8; + bool strip_query = 6; - oneof path_rewrite_specifier { + // When the scheme redirection take place, the following rules apply: + // 1. If the source URI scheme is `http` and the port is explicitly + // set to `:80`, the port will be removed after the redirection + // 2. If the source URI scheme is `https` and the port is explicitly + // set to `:443`, the port will be removed after the redirection + oneof scheme_rewrite_specifier { // The path portion of the URL will be swapped with this value. - string path_redirect = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + bool https_redirect = 4; // Indicates that during redirection, the matched prefix (or path) // should be swapped with this value. This option allows redirect URLs be dynamically created @@ -1247,17 +1175,20 @@ message RedirectAction { // // Pay attention to the use of trailing slashes as mentioned in // :ref:`RouteAction's prefix_rewrite `. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + string scheme_redirect = 7; } - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; + oneof path_rewrite_specifier { + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + string path_redirect = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. - bool strip_query = 6; + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } } message DirectResponseAction { @@ -1342,7 +1273,7 @@ message Tracing { // statistics are perfect in the sense that they are emitted on the downstream // side such that they include network level failures. // -// Documentation for :ref:`virtual cluster statistics `. +// Documentation for :ref:`virtual cluster statistics `. // // .. note:: // @@ -1352,25 +1283,6 @@ message Tracing { message VirtualCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualCluster"; - // Specifies a regex pattern to use for matching requests. The entire path of the request - // must match the regex. The regex grammar used is defined `here - // `_. - // - // Examples: - // - // * The regex ``/rides/\d+`` matches the path */rides/0* - // * The regex ``/rides/\d+`` matches the path */rides/123* - // * The regex ``/rides/\d+`` does not match the path */rides/123/456* - // - // .. attention:: - // This field has been deprecated in favor of `headers` as it is not safe for use with - // untrusted input in all cases. - string hidden_envoy_deprecated_pattern = 1 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - // Specifies a list of header matchers to use for matching requests. Each specified header must // match. The pseudo-headers `:path` and `:method` can be used to match the request path and // method, respectively. @@ -1381,11 +1293,12 @@ message VirtualCluster { // router filter and are documented :ref:`here `. string name = 2 [(validate.rules).string = {min_bytes: 1}]; - // Optionally specifies the HTTP method to match on. For example GET, PUT, - // etc. - // - // .. attention:: - // This field has been deprecated in favor of `headers`. + string hidden_envoy_deprecated_pattern = 1 [ + deprecated = true, + (validate.rules).string = {max_bytes: 1024}, + (envoy.annotations.disallowed_by_default) = true + ]; + core.v3.RequestMethod hidden_envoy_deprecated_method = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } @@ -1581,35 +1494,15 @@ message HeaderMatcher { string name = 1 [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + // If specified, header match will be performed based on the value of the header. + bool invert_match = 8; + // Specifies how the header match will be performed to route the request. oneof header_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 4; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. The regex grammar used in the value field is defined - // `here `_. - // - // Examples: - // - // * The regex ``\d{3}`` matches the value *123* - // * The regex ``\d{3}`` does not match the value *1234* - // * The regex ``\d{3}`` does not match the value *123.456* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use - // with untrusted input in all cases. - string hidden_envoy_deprecated_regex_match = 5 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. - type.matcher.v3.RegexMatcher safe_regex_match = 11; + string exact_match = 4; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. @@ -1622,11 +1515,11 @@ message HeaderMatcher { // // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, // "-1somestring" - type.v3.Int64Range range_match = 6; + type.matcher.v3.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on whether the header is in the // request. - bool present_match = 7; + type.v3.Int64Range range_match = 6; // If specified, header match will be performed based on the prefix of the header value. // Note: empty prefix is not allowed, please use present_match instead. @@ -1634,7 +1527,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; + bool present_match = 7; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1642,16 +1535,22 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. + string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, the match result will be inverted before checking. Defaults to false. + // + // Examples: + // + // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; - } - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. - bool invert_match = 8; + string hidden_envoy_deprecated_regex_match = 5 [ + deprecated = true, + (validate.rules).string = {max_bytes: 1024}, + (envoy.annotations.disallowed_by_default) = true + ]; + } } // Query parameter matching treats the query string of a request's :path header @@ -1665,30 +1564,17 @@ message QueryParameterMatcher { // *path*'s query string. string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; - // Specifies the value of the key. If the value is absent, a request - // that contains the key in its query string will match, whether the - // key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") - // - // ..attention:: - // This field is deprecated. Use an `exact` match inside the `string_match` field. + // Specifies whether a query parameter value should match against a string. string hidden_envoy_deprecated_value = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // Specifies whether the query parameter value is a regular expression. - // Defaults to false. The entire query parameter value (i.e., the part to - // the right of the equals sign in "key=value") must match the regex. - // E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. - // - // ..attention:: - // This field is deprecated. Use a `safe_regex` match inside the `string_match` field. + // Specifies whether a query parameter should be present. google.protobuf.BoolValue hidden_envoy_deprecated_regex = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; oneof query_parameter_match_specifier { - // Specifies whether a query parameter value should match against a string. type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; - // Specifies whether a query parameter should be present. bool present_match = 6; } } diff --git a/generated_api_shadow/envoy/config/route/v3/scoped_route.proto b/generated_api_shadow/envoy/config/route/v3/scoped_route.proto index fee88e862f47..f2b28ed974c0 100644 --- a/generated_api_shadow/envoy/config/route/v3/scoped_route.proto +++ b/generated_api_shadow/envoy/config/route/v3/scoped_route.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.config.route.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v3"; option java_outer_classname = "ScopedRouteProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` diff --git a/generated_api_shadow/envoy/config/route/v4alpha/BUILD b/generated_api_shadow/envoy/config/route/v4alpha/BUILD new file mode 100644 index 000000000000..507bedd76bdf --- /dev/null +++ b/generated_api_shadow/envoy/config/route/v4alpha/BUILD @@ -0,0 +1,17 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/tracing/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route.proto b/generated_api_shadow/envoy/config/route/v4alpha/route.proto new file mode 100644 index 000000000000..1b805d35344f --- /dev/null +++ b/generated_api_shadow/envoy/config/route/v4alpha/route.proto @@ -0,0 +1,117 @@ +syntax = "proto3"; + +package envoy.config.route.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP route configuration] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// [#next-free-field: 11] +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteConfiguration"; + + // The name of the route configuration. For example, it might match + // :ref:`route_config_name + // ` in + // :ref:`envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.Rds`. + string name = 1; + + // An array of virtual hosts that make up the route table. + repeated VirtualHost virtual_hosts = 2; + + // An array of virtual hosts will be dynamically loaded via the VHDS API. + // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used + // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for + // on-demand discovery of virtual hosts. The contents of these two fields will be merged to + // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration + // taking precedence. + Vhds vhds = 9; + + // Optionally specifies a list of HTTP headers that the connection manager + // will consider to be internal only. If they are found on external requests they will be cleaned + // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information. + repeated string internal_only_headers = 3 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; + + // Specifies a list of HTTP headers that should be added to each response that + // the connection manager encodes. Headers specified at this level are applied + // after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or + // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption response_headers_to_add = 4 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // that the connection manager encodes. + repeated string response_headers_to_remove = 5 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; + + // Specifies a list of HTTP headers that should be added to each request + // routed by the HTTP connection manager. Headers specified at this level are + // applied after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or + // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // routed by the HTTP connection manager. + repeated string request_headers_to_remove = 8 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; + + // By default, headers that should be added/removed are evaluated from most to least specific: + // + // * route level + // * virtual host level + // * connection manager level + // + // To allow setting overrides at the route or virtual host level, this order can be reversed + // by setting this option to true. Defaults to false. + // + // [#next-major-version: In the v3 API, this will default to true.] + bool most_specific_header_mutations_wins = 10; + + // An optional boolean that specifies whether the clusters that the route + // table refers to will be validated by the cluster manager. If set to true + // and a route refers to a non-existent cluster, the route table will not + // load. If set to false and a route refers to a non-existent cluster, the + // route table will load and the router filter will return a 404 if the route + // is selected at runtime. This setting defaults to true if the route table + // is statically defined via the :ref:`route_config + // ` + // option. This setting default to false if the route table is loaded dynamically via the + // :ref:`rds + // ` + // option. Users may wish to override the default behavior in certain cases (for example when + // using CDS with a static route table). + google.protobuf.BoolValue validate_clusters = 7; +} + +message Vhds { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Vhds"; + + // Configuration source specifier for VHDS. + core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto new file mode 100644 index 000000000000..33f8d64543df --- /dev/null +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -0,0 +1,1566 @@ +syntax = "proto3"; + +package envoy.config.route.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v3/regex.proto"; +import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/tracing/v3/custom_tag.proto"; +import "envoy/type/v3/percent.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; +option java_outer_classname = "RouteComponentsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP route components] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// The top level element in the routing configuration is a virtual host. Each virtual host has +// a logical name as well as a set of domains that get routed to it based on the incoming request's +// host header. This allows a single listener to service multiple top level domain path trees. Once +// a virtual host is selected based on the domain, the routes are processed in order to see which +// upstream cluster to route to or whether to perform a redirect. +// [#next-free-field: 21] +message VirtualHost { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.VirtualHost"; + + enum TlsRequirementType { + // No TLS requirement for the virtual host. + NONE = 0; + + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + EXTERNAL_ONLY = 1; + + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + ALL = 2; + } + + reserved 9, 12; + + reserved "per_filter_config"; + + // The logical name of the virtual host. This is used when emitting certain + // statistics but is not relevant for routing. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // A list of domains (host/authority header) that will be matched to this + // virtual host. Wildcard hosts are supported in the suffix or prefix form. + // + // Domain search order: + // 1. Exact domain names: ``www.foo.com``. + // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. + // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. + // 4. Special wildcard ``*`` matching any domain. + // + // .. note:: + // + // The wildcard will not match the empty string. + // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. + // The longest wildcards match first. + // Only a single virtual host in the entire route configuration can match on ``*``. A domain + // must be unique across all virtual hosts or the config will fail to load. + // + // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. + repeated string domains = 2 [(validate.rules).repeated = { + min_items: 1 + items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} + }]; + + // The list of routes that will be matched, in order, for incoming requests. + // The first route that matches will be used. + repeated Route routes = 3; + + // Specifies the type of TLS enforcement the virtual host expects. If this option is not + // specified, there is no TLS requirement for the virtual host. + TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; + + // A list of virtual clusters defined for this virtual host. Virtual clusters + // are used for additional statistics gathering. + repeated VirtualCluster virtual_clusters = 5; + + // Specifies a set of rate limit configurations that will be applied to the + // virtual host. + repeated RateLimit rate_limits = 6; + + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the + // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption request_headers_to_add = 7 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // handled by this virtual host. + repeated string request_headers_to_remove = 13; + + // Specifies a list of HTTP headers that should be added to each response + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the + // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // handled by this virtual host. + repeated string response_headers_to_remove = 11; + + // Indicates that the virtual host has a CORS policy. + CorsPolicy cors = 8; + + // The per_filter_config field can be used to provide virtual host-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map typed_per_filter_config = 15; + + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the upstream request. Setting this option will cause it to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the upstream + // will see the attempt count as perceived by the second Envoy. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + // + // [#next-major-version: rename to include_attempt_count_in_request.] + bool include_request_attempt_count = 14; + + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the downstream response. Setting this option will cause the router to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the downstream + // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + bool include_attempt_count_in_response = 19; + + // Indicates the retry policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + RetryPolicy retry_policy = 16; + + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that setting a route level entry + // will take precedence over this config and it'll be treated independently (e.g.: values are not + // inherited). :ref:`Retry policy ` should not be + // set if this field is used. + google.protobuf.Any retry_policy_typed_config = 20; + + // Indicates the hedge policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + HedgePolicy hedge_policy = 17; + + // The maximum bytes which will be buffered for retries and shadowing. + // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum + // value of this and the listener per_connection_buffer_limit_bytes. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; +} + +// A filter-defined action type. +message FilterAction { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.FilterAction"; + + google.protobuf.Any action = 1; +} + +// A route is both a specification of how to match a request as well as an indication of what to do +// next (e.g., redirect, forward, rewrite, etc.). +// +// .. attention:: +// +// Envoy supports routing on HTTP method via :ref:`header matching +// `. +// [#next-free-field: 18] +message Route { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Route"; + + reserved 6, 8; + + reserved "per_filter_config"; + + // Name for the route. + string name = 14; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + oneof action { + option (validate.required) = true; + + // Route request to some upstream cluster. + RouteAction route = 2; + + // Return a redirect. + RedirectAction redirect = 3; + + // Return an arbitrary HTTP response directly, without proxying. + DirectResponseAction direct_response = 7; + + // [#not-implemented-hide:] + // If true, a filter will define the action (e.g., it could dynamically generate the + // RouteAction). + FilterAction filter_action = 17; + } + + // The Metadata field can be used to provide additional information + // about the route. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as *envoy.filters.http.router*. + core.v4alpha.Metadata metadata = 4; + + // Decorator for the matched route. + Decorator decorator = 5; + + // The typed_per_filter_config field can be used to provide route-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` for + // if and how it is utilized. + map typed_per_filter_config = 13; + + // Specifies a set of headers that will be added to requests matching this + // route. Headers specified at this level are applied before headers from the + // enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and + // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption request_headers_to_add = 9 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // matching this route. + repeated string request_headers_to_remove = 12; + + // Specifies a set of headers that will be added to responses to requests + // matching this route. Headers specified at this level are applied before + // headers from the enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and + // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on + // :ref:`custom request headers `. + repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + repeated string response_headers_to_remove = 11; + + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + Tracing tracing = 15; + + // The maximum bytes which will be buffered for retries and shadowing. + // If set, the bytes actually buffered will be the minimum value of this and the + // listener per_connection_buffer_limit_bytes. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; +} + +// Compared to the :ref:`cluster ` field that specifies a +// single upstream cluster as the target of a request, the :ref:`weighted_clusters +// ` option allows for specification of +// multiple upstream clusters along with weights that indicate the percentage of +// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the +// weights. +message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.WeightedCluster"; + + // [#next-free-field: 11] + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.WeightedCluster.ClusterWeight"; + + reserved 7, 8; + + reserved "per_filter_config"; + + // Name of the upstream cluster. The cluster must exist in the + // :ref:`cluster manager configuration `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // An integer between 0 and :ref:`total_weight + // `. When a request matches the route, + // the choice of an upstream cluster is determined by its weight. The sum of weights across all + // entries in the clusters array must add up to the total_weight, which defaults to 100. + google.protobuf.UInt32Value weight = 2; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered for + // load balancing. Note that this will be merged with what's provided in + // :ref:`RouteAction.metadata_match `, with + // values here taking precedence. The filter name should be specified as *envoy.lb*. + core.v4alpha.Metadata metadata_match = 3; + + // Specifies a list of headers to be added to requests when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and + // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption request_headers_to_add = 4 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request when + // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. + repeated string request_headers_to_remove = 9; + + // Specifies a list of headers to be added to responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and + // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v4alpha.HeaderValueOption response_headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of headers to be removed from responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. + repeated string response_headers_to_remove = 6; + + // The per_filter_config field can be used to provide weighted cluster-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map typed_per_filter_config = 10; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Specifies the total weight across all clusters. The sum of all cluster weights must equal this + // value, which must be greater than 0. Defaults to 100. + google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; + + // Specifies the runtime key prefix that should be used to construct the + // runtime keys associated with each cluster. When the *runtime_key_prefix* is + // specified, the router will look for weights associated with each upstream + // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where + // *cluster[i]* denotes an entry in the clusters array field. If the runtime + // key for the cluster does not exist, the value specified in the + // configuration file will be used as the default weight. See the :ref:`runtime documentation + // ` for how key names map to the underlying implementation. + string runtime_key_prefix = 2; +} + +// [#next-free-field: 12] +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch"; + + message GrpcRouteMatchOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions"; + } + + message TlsContextMatchOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteMatch.TlsContextMatchOptions"; + + // If specified, the route will match against whether or not a certificate is presented. + // If not specified, certificate presentation status (true or false) will not be considered when route matching. + google.protobuf.BoolValue presented = 1; + + // If specified, the route will match against whether or not a certificate is validated. + // If not specified, certificate validation status (true or false) will not be considered when route matching. + google.protobuf.BoolValue validated = 2; + } + + reserved 5, 3; + + reserved "regex"; + + oneof path_specifier { + option (validate.required) = true; + + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the *:path* header. + string prefix = 1; + + // If specified, the route is an exact path rule meaning that the path must + // exactly match the *:path* header once the query string is removed. + string path = 2; + + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + } + + // Indicates that prefix/path matching should be case insensitive. The default + // is true. + google.protobuf.BoolValue case_sensitive = 4; + + // Indicates that the route should additionally match on a runtime key. Every time the route + // is considered for a match, it must also fall under the percentage of matches indicated by + // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the router continues to evaluate the remaining match criteria. A runtime_fraction + // route configuration can be used to roll out route changes in a gradual manner without full + // code/config deploys. Refer to the :ref:`traffic shifting + // ` docs for additional documentation. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an + // integer with the assumption that the value is an integral percentage out of 100. For + // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent + // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. + core.v4alpha.RuntimeFractionalPercent runtime_fraction = 9; + + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + repeated HeaderMatcher headers = 6; + + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the *path* header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the *path* header's + // query string for a match to occur. + repeated QueryParameterMatcher query_parameters = 7; + + // If specified, only gRPC requests will be matched. The router will check + // that the content-type header has a application/grpc or one of the various + // application/grpc+ values. + GrpcRouteMatchOptions grpc = 8; + + // If specified, the client tls context will be matched against the defined + // match options. + // + // [#next-major-version: unify with RBAC] + TlsContextMatchOptions tls_context = 11; +} + +// [#next-free-field: 12] +message CorsPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.CorsPolicy"; + + reserved 1, 8, 7; + + reserved "allow_origin", "allow_origin_regex", "enabled"; + + // Specifies string patterns that match allowed origins. An origin is allowed if any of the + // string matchers match. + repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; + + // Specifies the content for the *access-control-allow-methods* header. + string allow_methods = 2; + + // Specifies the content for the *access-control-allow-headers* header. + string allow_headers = 3; + + // Specifies the content for the *access-control-expose-headers* header. + string expose_headers = 4; + + // Specifies the content for the *access-control-max-age* header. + string max_age = 5; + + // Specifies whether the resource allows credentials. + google.protobuf.BoolValue allow_credentials = 6; + + oneof enabled_specifier { + // Specifies the % of requests for which the CORS filter is enabled. + // + // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS + // filter will be enabled for 100% of the requests. + // + // If :ref:`runtime_key ` is + // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + } + + // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not + // enforced. + // + // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those + // fields have to explicitly disable the filter in order for this setting to take effect. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's *Origin* to determine if it's valid but will not enforce any policies. + core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; +} + +// [#next-free-field: 34] +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; + + enum ClusterNotFoundResponseCode { + // HTTP status code - 503 Service Unavailable. + SERVICE_UNAVAILABLE = 0; + + // HTTP status code - 404 Not Found. + NOT_FOUND = 1; + } + + // Configures :ref:`internal redirect ` behavior. + enum InternalRedirectAction { + PASS_THROUGH_INTERNAL_REDIRECT = 0; + HANDLE_INTERNAL_REDIRECT = 1; + } + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is + // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. + // + // .. note:: + // + // Shadowing will not be triggered if the primary cluster does not exist. + message RequestMirrorPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.RequestMirrorPolicy"; + + reserved 2; + + reserved "runtime_key"; + + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If not specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the `runtime_key` field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + core.v4alpha.RuntimeFractionalPercent runtime_fraction = 3; + + // Determines if the trace span should be sampled. Defaults to true. + google.protobuf.BoolValue trace_sampled = 4; + } + + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + // `. + // [#next-free-field: 7] + message HashPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy"; + + message Header { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.Header"; + + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + string header_name = 1 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} + ]; + } + + // Envoy supports two types of cookie affinity: + // + // 1. Passive. Envoy takes a cookie that's present in the cookies header and + // hashes on its value. + // + // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + // on the first request from the client in its response to the client, + // based on the endpoint the request gets sent to. The client then + // presents this on the next and all subsequent requests. The hash of + // this is sufficient to ensure these requests get sent to the same + // endpoint. The cookie is generated by hashing the source and + // destination ports and addresses so that multiple independent HTTP2 + // streams on the same connection will independently receive the same + // cookie, even if they arrive at the Envoy simultaneously. + message Cookie { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.Cookie"; + + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + google.protobuf.Duration ttl = 2; + + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + string path = 3; + } + + message ConnectionProperties { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties"; + + // Hash on source IP address. + bool source_ip = 1; + } + + message QueryParameter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter"; + + // The name of the URL query parameter that will be used to obtain the hash + // key. If the parameter is not present, no hash will be produced. Query + // parameter names are case-sensitive. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + message FilterState { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.HashPolicy.FilterState"; + + // The name of the Object in the per-request filterState, which is an + // Envoy::Http::Hashable object. If there is no data associated with the key, + // or the stored object is not Envoy::Http::Hashable, no hash will be produced. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + oneof policy_specifier { + option (validate.required) = true; + + // Header hash policy. + Header header = 1; + + // Cookie hash policy. + Cookie cookie = 2; + + // Connection properties hash policy. + ConnectionProperties connection_properties = 3; + + // Query parameter hash policy. + QueryParameter query_parameter = 5; + + // Filter state hash policy. + FilterState filter_state = 6; + } + + // The flag that short-circuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; + } + + // Allows enabling and disabling upgrades on a per-route basis. + // This overrides any enabled/disabled upgrade filter chain specified in the + // HttpConnectionManager + // :ref:`upgrade_configs + // ` + // but does not affect any custom filter chain specified there. + message UpgradeConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.UpgradeConfig"; + + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + string upgrade_type = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Determines if upgrades are available on this route. Defaults to true. + google.protobuf.BoolValue enabled = 2; + } + + reserved 12, 18, 19, 16, 22, 21, 10; + + reserved "request_mirror_policy"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + string cluster_header = 2 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedCluster weighted_clusters = 3; + } + + // The HTTP status code to use when configured cluster is not found. + // The default response code is 503 Service Unavailable. + ClusterNotFoundResponseCode cluster_not_found_response_code = 20 + [(validate.rules).enum = {defined_only: true}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what's set in this field will be considered + // for load balancing. If using :ref:`weighted_clusters + // `, metadata will be merged, with values + // provided there taking precedence. The filter name should be specified as *envoy.lb*. + core.v4alpha.Metadata metadata_match = 4; + + // Indicates that during forwarding, the matched prefix (or path) should be + // swapped with this value. This option allows application URLs to be rooted + // at a different path from those exposed at the reverse proxy layer. The router filter will + // place the original path before rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of *prefix_rewrite* or + // :ref:`regex_rewrite ` + // may be specified. + // + // .. attention:: + // + // Pay careful attention to the use of trailing slashes in the + // :ref:`route's match ` prefix value. + // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, + // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single + // :ref:`Route `, as shown by the below config entries: + // + // .. code-block:: yaml + // + // - match: + // prefix: "/prefix/" + // route: + // prefix_rewrite: "/" + // - match: + // prefix: "/prefix" + // route: + // prefix_rewrite: "/" + // + // Having above entries in the config, requests to */prefix* will be stripped to */*, while + // requests to */prefix/etc* will be stripped to */etc*. + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during forwarding, portions of the path that match the + // pattern should be rewritten, even allowing the substitution of capture + // groups from the pattern into the new path as specified by the rewrite + // substitution string. This is useful to allow application paths to be + // rewritten in a way that is aware of segments with variable content like + // identifiers. The router filter will place the original path as it was + // before the rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of :ref:`prefix_rewrite ` + // or *regex_rewrite* may be specified. + // + // Examples using Google's `RE2 `_ engine: + // + // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution + // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` + // into ``/v1/api/instance/foo``. + // + // * The pattern ``one`` paired with a substitution string of ``two`` would + // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. + // + // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of + // ``\1two\2`` would replace only the first occurrence of ``one``, + // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. + // + // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` + // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to + // ``/aaa/yyy/bbb``. + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 6 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type *strict_dns* or *logical_dns*. Setting this to true with other cluster + // types has no effect. + google.protobuf.BoolValue auto_host_rewrite = 7; + + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + string host_rewrite_header = 29 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + } + + // Specifies the upstream timeout for the route. If not specified, the default is 15s. This + // spans between the point at which the entire downstream request (i.e. end-of-stream) has been + // processed and when the upstream response has been completely processed. A value of 0 will + // disable the route's timeout. + // + // .. note:: + // + // This timeout includes all retries. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + google.protobuf.Duration timeout = 8; + + // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, + // although the connection manager wide :ref:`stream_idle_timeout + // ` + // will still apply. A value of 0 will completely disable the route's idle timeout, even if a + // connection manager stream idle timeout is configured. + // + // The idle timeout is distinct to :ref:`timeout + // `, which provides an upper bound + // on the upstream response time; :ref:`idle_timeout + // ` instead bounds the amount + // of time the request's stream may be idle. + // + // After header decoding, the idle timeout will apply on downstream and + // upstream request events. Each time an encode/decode event for headers or + // data is processed for the stream, the timer will be reset. If the timeout + // fires, the stream is terminated with a 408 Request Timeout error code if no + // upstream response header has been received, otherwise a stream reset + // occurs. + google.protobuf.Duration idle_timeout = 24; + + // Indicates that the route has a retry policy. Note that if this is set, + // it'll take precedence over the virtual host level retry policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + RetryPolicy retry_policy = 9; + + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that if this is set, it'll take + // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, + // most internal one becomes the enforced policy). :ref:`Retry policy ` + // should not be set if this field is used. + google.protobuf.Any retry_policy_typed_config = 33; + + // Indicates that the route has request mirroring policies. + repeated RequestMirrorPolicy request_mirror_policies = 30; + + // Optionally specifies the :ref:`routing priority `. + core.v4alpha.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; + + // Specifies a set of rate limit configurations that could be applied to the + // route. + repeated RateLimit rate_limits = 13; + + // Specifies if the rate limit filter should include the virtual host rate + // limits. By default, if the route configured rate limits, the virtual host + // :ref:`rate_limits ` are not applied to the + // request. + google.protobuf.BoolValue include_vh_rate_limits = 14; + + // Specifies a list of hash policies to use for ring hash load balancing. Each + // hash policy is evaluated individually and the combined result is used to + // route the request. The method of combination is deterministic such that + // identical lists of hash policies will produce the same hash. Since a hash + // policy examines specific parts of a request, it can fail to produce a hash + // (i.e. if the hashed header is not present). If (and only if) all configured + // hash policies fail to generate a hash, no hash will be produced for + // the route. In this case, the behavior is the same as if no hash policies + // were specified (i.e. the ring hash load balancer will choose a random + // backend). If a hash policy has the "terminal" attribute set to true, and + // there is already a hash generated, the hash is returned immediately, + // ignoring the rest of the hash policy list. + repeated HashPolicy hash_policy = 15; + + // Indicates that the route has a CORS policy. + CorsPolicy cors = 17; + + // If present, and the request is a gRPC request, use the + // `grpc-timeout header `_, + // or its default value (infinity) instead of + // :ref:`timeout `, but limit the applied timeout + // to the maximum value specified here. If configured as 0, the maximum allowed timeout for + // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used + // and gRPC requests time out like any other requests using + // :ref:`timeout ` or its default. + // This can be used to prevent unexpected upstream request timeouts due to potentially long + // time gaps between gRPC request and response in gRPC streaming mode. + // + // .. note:: + // + // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes + // precedence over `grpc-timeout header `_, when + // both are present. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + google.protobuf.Duration max_grpc_timeout = 23; + + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + google.protobuf.Duration grpc_timeout_offset = 28; + + repeated UpgradeConfig upgrade_configs = 25; + + InternalRedirectAction internal_redirect_action = 26; + + // An internal redirect is handled, iff the number of previous internal redirects that a + // downstream request has encountered is lower than this value, and + // :ref:`internal_redirect_action ` + // is set to :ref:`HANDLE_INTERNAL_REDIRECT + // ` + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or has + // :ref:`internal_redirect_action ` + // set to + // :ref:`PASS_THROUGH_INTERNAL_REDIRECT + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 31; + + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + HedgePolicy hedge_policy = 27; +} + +// HTTP retry :ref:`architecture overview `. +// [#next-free-field: 11] +message RetryPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy"; + + message RetryPriority { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RetryPolicy.RetryPriority"; + + reserved 2; + + reserved "config"; + + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + message RetryHostPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RetryPolicy.RetryHostPredicate"; + + reserved 2; + + reserved "config"; + + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + message RetryBackOff { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RetryPolicy.RetryBackOff"; + + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the `base_interval` if set. The default is 10 times the + // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; + } + + // Specifies the conditions under which retry takes place. These are the same + // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + string retry_on = 1; + + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. These are the same conditions documented for + // :ref:`config_http_filters_router_x-envoy-max-retries`. + google.protobuf.UInt32Value num_retries = 2; + + // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The + // same conditions documented for + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + // + // .. note:: + // + // If left unspecified, Envoy will use the global + // :ref:`route timeout ` for the request. + // Consequently, when using a :ref:`5xx ` based + // retry policy, a request that times out will not be retried as the total timeout budget + // would have been exhausted. + google.protobuf.Duration per_try_timeout = 3; + + // Specifies an implementation of a RetryPriority which is used to determine the + // distribution of load across priorities used for retries. Refer to + // :ref:`retry plugin configuration ` for more details. + RetryPriority retry_priority = 4; + + // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host + // for retries. If any of the predicates reject the host, host selection will be reattempted. + // Refer to :ref:`retry plugin configuration ` for more + // details. + repeated RetryHostPredicate retry_host_predicate = 5; + + // The maximum number of times host selection will be reattempted before giving up, at which + // point the host that was last selected will be routed to. If unspecified, this will default to + // retrying once. + int64 host_selection_retry_max_attempts = 6; + + // HTTP status codes that should trigger a retry in addition to those specified by retry_on. + repeated uint32 retriable_status_codes = 7; + + // Specifies parameters that control retry back off. This parameter is optional, in which case the + // default base interval is 25 milliseconds or, if set, the current value of the + // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times + // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + // describes Envoy's back-off algorithm. + RetryBackOff retry_back_off = 8; + + // HTTP response headers that trigger a retry if present in the response. A retry will be + // triggered if any of the header matches match the upstream response headers. + // The field is only consulted if 'retriable-headers' retry policy is active. + repeated HeaderMatcher retriable_headers = 9; + + // HTTP headers which must be present in the request for retries to be attempted. + repeated HeaderMatcher retriable_request_headers = 10; +} + +// HTTP request hedging :ref:`architecture overview `. +message HedgePolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.HedgePolicy"; + + // Specifies the number of initial requests that should be sent upstream. + // Must be at least 1. + // Defaults to 1. + // [#not-implemented-hide:] + google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; + + // Specifies a probability that an additional upstream request should be sent + // on top of what is specified by initial_requests. + // Defaults to 0. + // [#not-implemented-hide:] + type.v3.FractionalPercent additional_request_chance = 2; + + // Indicates that a hedged request should be sent when the per-try timeout + // is hit. This will only occur if the retry policy also indicates that a + // timed out request should be retried. + // Once a timed out request is retried due to per try timeout, the router + // filter will ensure that it is not retried again even if the returned + // response headers would otherwise be retried according the specified + // :ref:`RetryPolicy `. + // Defaults to false. + bool hedge_on_per_try_timeout = 3; +} + +// [#next-free-field: 9] +message RedirectAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RedirectAction"; + + enum RedirectResponseCode { + // Moved Permanently HTTP Status Code - 301. + MOVED_PERMANENTLY = 0; + + // Found HTTP Status Code - 302. + FOUND = 1; + + // See Other HTTP Status Code - 303. + SEE_OTHER = 2; + + // Temporary Redirect HTTP Status Code - 307. + TEMPORARY_REDIRECT = 3; + + // Permanent Redirect HTTP Status Code - 308. + PERMANENT_REDIRECT = 4; + } + + // When the scheme redirection take place, the following rules apply: + // 1. If the source URI scheme is `http` and the port is explicitly + // set to `:80`, the port will be removed after the redirection + // 2. If the source URI scheme is `https` and the port is explicitly + // set to `:443`, the port will be removed after the redirection + oneof scheme_rewrite_specifier { + // The scheme portion of the URL will be swapped with "https". + bool https_redirect = 4; + + // The scheme portion of the URL will be swapped with this value. + string scheme_redirect = 7; + } + + // The host portion of the URL will be swapped with this value. + string host_redirect = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // The port value of the URL will be swapped with this value. + uint32 port_redirect = 8; + + oneof path_rewrite_specifier { + // The path portion of the URL will be swapped with this value. + string path_redirect = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during redirection, the matched prefix (or path) + // should be swapped with this value. This option allows redirect URLs be dynamically created + // based on the request. + // + // .. attention:: + // + // Pay attention to the use of trailing slashes as mentioned in + // :ref:`RouteAction's prefix_rewrite `. + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } + + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; + + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + bool strip_query = 6; +} + +message DirectResponseAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.DirectResponseAction"; + + // Specifies the HTTP response status to be returned. + uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; + + // Specifies the content of the response body. If this setting is omitted, + // no body is included in the generated response. + // + // .. note:: + // + // Headers can be specified using *response_headers_to_add* in the enclosing + // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` or + // :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`. + core.v4alpha.DataSource body = 2; +} + +message Decorator { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Decorator"; + + // The operation name associated with the request matched to this route. If tracing is + // enabled, this information will be used as the span name reported for this request. + // + // .. note:: + // + // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden + // by the :ref:`x-envoy-decorator-operation + // ` header. + string operation = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Whether the decorated details should be propagated to the other party. The default is true. + google.protobuf.BoolValue propagate = 2; +} + +message Tracing { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Tracing"; + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + type.v3.FractionalPercent client_sampling = 1; + + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.FractionalPercent random_sampling = 2; + + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.FractionalPercent overall_sampling = 3; + + // A list of custom tags with unique tag name to create tags for the active span. + // It will take effect after merging with the :ref:`corresponding configuration + // ` + // configured in the HTTP connection manager. If two tags with the same name are configured + // each in the HTTP connection manager and the route level, the one configured here takes + // priority. + repeated type.tracing.v3.CustomTag custom_tags = 4; +} + +// A virtual cluster is a way of specifying a regex matching rule against +// certain important endpoints such that statistics are generated explicitly for +// the matched requests. The reason this is useful is that when doing +// prefix/path matching Envoy does not always know what the application +// considers to be an endpoint. Thus, it’s impossible for Envoy to generically +// emit per endpoint statistics. However, often systems have highly critical +// endpoints that they wish to get “perfect†statistics on. Virtual cluster +// statistics are perfect in the sense that they are emitted on the downstream +// side such that they include network level failures. +// +// Documentation for :ref:`virtual cluster statistics `. +// +// .. note:: +// +// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for +// every application endpoint. This is both not easily maintainable and as well the matching and +// statistics output are not free. +message VirtualCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.VirtualCluster"; + + reserved 1, 3; + + reserved "pattern", "method"; + + // Specifies a list of header matchers to use for matching requests. Each specified header must + // match. The pseudo-headers `:path` and `:method` can be used to match the request path and + // method, respectively. + repeated HeaderMatcher headers = 4; + + // Specifies the name of the virtual cluster. The virtual cluster name as well + // as the virtual host name are used when emitting statistics. The statistics are emitted by the + // router filter and are documented :ref:`here `. + string name = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Global rate limiting :ref:`architecture overview `. +message RateLimit { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; + + // [#next-free-field: 7] + message Action { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action"; + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("source_cluster", "") + // + // is derived from the :option:`--service-cluster` option. + message SourceCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.SourceCluster"; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("destination_cluster", "") + // + // Once a request matches against a route table rule, a routed cluster is determined by one of + // the following :ref:`route table configuration ` + // settings: + // + // * :ref:`cluster ` indicates the upstream cluster + // to route to. + // * :ref:`weighted_clusters ` + // chooses a cluster randomly from a set of clusters with attributed weight. + // * :ref:`cluster_header ` indicates which + // header in the request contains the target cluster. + message DestinationCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.DestinationCluster"; + } + + // The following descriptor entry is appended when a header contains a key that matches the + // *header_name*: + // + // .. code-block:: cpp + // + // ("", "") + message RequestHeaders { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.RequestHeaders"; + + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + string header_name = 1 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} + ]; + + // The key to use in the descriptor entry. + string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // The following descriptor entry is appended to the descriptor and is populated using the + // trusted address from :ref:`x-forwarded-for `: + // + // .. code-block:: cpp + // + // ("remote_address", "") + message RemoteAddress { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.RemoteAddress"; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("generic_key", "") + message GenericKey { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.GenericKey"; + + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("header_match", "") + message HeaderValueMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.HeaderValueMatch"; + + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + google.protobuf.BoolValue expect_match = 2; + + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request’s headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; + } + + oneof action_specifier { + option (validate.required) = true; + + // Rate limit on source cluster. + SourceCluster source_cluster = 1; + + // Rate limit on destination cluster. + DestinationCluster destination_cluster = 2; + + // Rate limit on request headers. + RequestHeaders request_headers = 3; + + // Rate limit on remote address. + RemoteAddress remote_address = 4; + + // Rate limit on a generic key. + GenericKey generic_key = 5; + + // Rate limit on the existence of request headers. + HeaderValueMatch header_value_match = 6; + } + } + + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; +} + +// .. attention:: +// +// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* +// header. Thus, if attempting to match on *Host*, match on *:authority* instead. +// +// .. attention:: +// +// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both +// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., +// +// .. code-block:: json +// +// { +// "name": ":method", +// "exact_match": "POST" +// } +// +// .. attention:: +// In the absence of any header match specifier, match will default to :ref:`present_match +// `. i.e, a request that has the :ref:`name +// ` header will match, regardless of the header's +// value. +// +// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] +// [#next-free-field: 12] +message HeaderMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.HeaderMatcher"; + + reserved 2, 3, 5; + + reserved "regex_match"; + + // Specifies the name of the header in the request. + string name = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Specifies how the header match will be performed to route the request. + oneof header_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 4; + + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. + type.matcher.v3.RegexMatcher safe_regex_match = 11; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting of + // an optional plus or minus sign followed by a sequence of digits. The rule will not match if + // the header value does not represent an integer. Match will fail for empty values, floating + // point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, + // "-1somestring" + type.v3.Int64Range range_match = 6; + + // If specified, header match will be performed based on whether the header is in the + // request. + bool present_match = 7; + + // If specified, header match will be performed based on the prefix of the header value. + // Note: empty prefix is not allowed, please use present_match instead. + // + // Examples: + // + // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. + string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, header match will be performed based on the suffix of the header value. + // Note: empty suffix is not allowed, please use present_match instead. + // + // Examples: + // + // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. + string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; + } + + // If specified, the match result will be inverted before checking. Defaults to false. + // + // Examples: + // + // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. + bool invert_match = 8; +} + +// Query parameter matching treats the query string of a request's :path header +// as an ampersand-separated list of keys and/or key=value elements. +// [#next-free-field: 7] +message QueryParameterMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.QueryParameterMatcher"; + + reserved 3, 4; + + reserved "value", "regex"; + + // Specifies the name of a key that must be present in the requested + // *path*'s query string. + string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; + + oneof query_parameter_match_specifier { + // Specifies whether a query parameter value should match against a string. + type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + + // Specifies whether a query parameter should be present. + bool present_match = 6; + } +} diff --git a/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto b/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto new file mode 100644 index 000000000000..ce3d285b0592 --- /dev/null +++ b/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto @@ -0,0 +1,117 @@ +syntax = "proto3"; + +package envoy.config.route.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; +option java_outer_classname = "ScopedRouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP scoped routing configuration] +// * Routing :ref:`architecture overview ` + +// Specifies a routing scope, which associates a +// :ref:`Key` to a +// :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` (identified by its resource name). +// +// The HTTP connection manager builds up a table consisting of these Key to +// RouteConfiguration mappings, and looks up the RouteConfiguration to use per +// request according to the algorithm specified in the +// :ref:`scope_key_builder` +// assigned to the HttpConnectionManager. +// +// For example, with the following configurations (in YAML): +// +// HttpConnectionManager config: +// +// .. code:: +// +// ... +// scoped_routes: +// name: foo-scoped-routes +// scope_key_builder: +// fragments: +// - header_value_extractor: +// name: X-Route-Selector +// element_separator: , +// element: +// separator: = +// key: vip +// +// ScopedRouteConfiguration resources (specified statically via +// :ref:`scoped_route_configurations_list` +// or obtained dynamically via SRDS): +// +// .. code:: +// +// (1) +// name: route-scope1 +// route_configuration_name: route-config1 +// key: +// fragments: +// - string_key: 172.10.10.20 +// +// (2) +// name: route-scope2 +// route_configuration_name: route-config2 +// key: +// fragments: +// - string_key: 172.20.20.30 +// +// A request from a client such as: +// +// .. code:: +// +// GET / HTTP/1.1 +// Host: foo.com +// X-Route-Selector: vip=172.10.10.20 +// +// would result in the routing table defined by the `route-config1` +// RouteConfiguration being assigned to the HTTP request/stream. +// +message ScopedRouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.ScopedRouteConfiguration"; + + // Specifies a key which is matched against the output of the + // :ref:`scope_key_builder` + // specified in the HttpConnectionManager. The matching is done per HTTP + // request and is dependent on the order of the fragments contained in the + // Key. + message Key { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.ScopedRouteConfiguration.Key"; + + message Fragment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment"; + + oneof type { + option (validate.required) = true; + + // A string to match against. + string string_key = 1; + } + } + + // The ordered set of fragments to match against. The order must match the + // fragments in the corresponding + // :ref:`scope_key_builder`. + repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // The name assigned to the routing scope. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v3.DiscoveryRequest` to an + // RDS server to fetch the :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` associated + // with this scope. + string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; + + // The key to match against. + Key key = 3 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/config/tap/v3/common.proto b/generated_api_shadow/envoy/config/tap/v3/common.proto index d01ce006faea..0fea8f88a638 100644 --- a/generated_api_shadow/envoy/config/tap/v3/common.proto +++ b/generated_api_shadow/envoy/config/tap/v3/common.proto @@ -8,13 +8,14 @@ import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.tap.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common tap configuration] diff --git a/generated_api_shadow/envoy/config/trace/v2/BUILD b/generated_api_shadow/envoy/config/trace/v2/BUILD index 15069690c2e8..ca496808bdae 100644 --- a/generated_api_shadow/envoy/config/trace/v2/BUILD +++ b/generated_api_shadow/envoy/config/trace/v2/BUILD @@ -8,6 +8,7 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", ], ) diff --git a/generated_api_shadow/envoy/config/trace/v2/trace.proto b/generated_api_shadow/envoy/config/trace/v2/trace.proto index 420e4aa28ff0..393465d2bb24 100644 --- a/generated_api_shadow/envoy/config/trace/v2/trace.proto +++ b/generated_api_shadow/envoy/config/trace/v2/trace.proto @@ -11,11 +11,13 @@ import "google/protobuf/wrappers.proto"; import "opencensus/proto/trace/v1/trace_config.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "TraceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tracing] // Tracing :ref:`architecture overview `. @@ -61,12 +63,31 @@ message Tracing { // Configuration for the LightStep tracer. // [#extension: envoy.tracers.lightstep] message LightstepConfig { + // Available propagation modes + enum PropagationMode { + // Propagate trace context in the single header x-ot-span-context. + ENVOY = 0; + + // Propagate trace context using LightStep's native format. + LIGHTSTEP = 1; + + // Propagate trace context using the b3 format. + B3 = 2; + + // Propagation trace context using the w3 trace-context standard. + TRACE_CONTEXT = 3; + } + // The cluster manager cluster that hosts the LightStep collectors. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // File containing the access token to the `LightStep // `_ API. string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Propagation modes to use by LightStep's tracer. + repeated PropagationMode propagation_modes = 3 + [(validate.rules).repeated = {items {enum {defined_only: true}}}]; } // Configuration for the Zipkin tracer. diff --git a/generated_api_shadow/envoy/config/trace/v2alpha/BUILD b/generated_api_shadow/envoy/config/trace/v2alpha/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/generated_api_shadow/envoy/config/trace/v2alpha/BUILD +++ b/generated_api_shadow/envoy/config/trace/v2alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto b/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto index d415846dfef1..27db3ba40b72 100644 --- a/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto +++ b/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto @@ -5,11 +5,13 @@ package envoy.config.trace.v2alpha; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2alpha"; option java_outer_classname = "XrayProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: AWS X-Ray Tracer Configuration] // Configuration for AWS X-Ray tracer @@ -19,8 +21,8 @@ message XRayConfig { // If this value is not set, the default value of 127.0.0.1:2000 will be used. api.v2.core.SocketAddress daemon_endpoint = 1; - // The name of the X-Ray segment. By default this will be set to the cluster name. - string segment_name = 2; + // The name of the X-Ray segment. + string segment_name = 2 [(validate.rules).string = {min_len: 1}]; // The location of a local custom sampling rules JSON file. // For an example of the sampling rules see: diff --git a/generated_api_shadow/envoy/config/trace/v3/trace.proto b/generated_api_shadow/envoy/config/trace/v3/trace.proto index 82b34fe7d210..174ab5dceb01 100644 --- a/generated_api_shadow/envoy/config/trace/v3/trace.proto +++ b/generated_api_shadow/envoy/config/trace/v3/trace.proto @@ -9,14 +9,16 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "opencensus/proto/trace/v1/trace_config.proto"; -import "udpa/annotations/versioning.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "TraceProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tracing] // Tracing :ref:`architecture overview `. @@ -54,9 +56,9 @@ message Tracing { // - :ref:`OpenCensusConfig ` // - :ref:`AWS X-Ray ` oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } @@ -70,12 +72,31 @@ message LightstepConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.LightstepConfig"; + // Available propagation modes + enum PropagationMode { + // Propagate trace context in the single header x-ot-span-context. + ENVOY = 0; + + // Propagate trace context using LightStep's native format. + LIGHTSTEP = 1; + + // Propagate trace context using the b3 format. + B3 = 2; + + // Propagation trace context using the w3 trace-context standard. + TRACE_CONTEXT = 3; + } + // The cluster manager cluster that hosts the LightStep collectors. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // File containing the access token to the `LightStep // `_ API. string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Propagation modes to use by LightStep's tracer. + repeated PropagationMode propagation_modes = 3 + [(validate.rules).repeated = {items {enum {defined_only: true}}}]; } // Configuration for the Zipkin tracer. diff --git a/generated_api_shadow/envoy/config/trace/v3/xray.proto b/generated_api_shadow/envoy/config/trace/v3/xray.proto index 08baf444c38a..c4259177d657 100644 --- a/generated_api_shadow/envoy/config/trace/v3/xray.proto +++ b/generated_api_shadow/envoy/config/trace/v3/xray.proto @@ -5,13 +5,14 @@ package envoy.config.trace.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "XrayProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AWS X-Ray Tracer Configuration] // Configuration for AWS X-Ray tracer @@ -24,8 +25,8 @@ message XRayConfig { // If this value is not set, the default value of 127.0.0.1:2000 will be used. core.v3.SocketAddress daemon_endpoint = 1; - // The name of the X-Ray segment. By default this will be set to the cluster name. - string segment_name = 2; + // The name of the X-Ray segment. + string segment_name = 2 [(validate.rules).string = {min_len: 1}]; // The location of a local custom sampling rules JSON file. // For an example of the sampling rules see: diff --git a/generated_api_shadow/envoy/config/trace/v4alpha/BUILD b/generated_api_shadow/envoy/config/trace/v4alpha/BUILD new file mode 100644 index 000000000000..53ae98aac140 --- /dev/null +++ b/generated_api_shadow/envoy/config/trace/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/trace/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", + ], +) diff --git a/generated_api_shadow/envoy/config/trace/v4alpha/trace.proto b/generated_api_shadow/envoy/config/trace/v4alpha/trace.proto new file mode 100644 index 000000000000..ae8ecc846597 --- /dev/null +++ b/generated_api_shadow/envoy/config/trace/v4alpha/trace.proto @@ -0,0 +1,271 @@ +syntax = "proto3"; + +package envoy.config.trace.v4alpha; + +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "opencensus/proto/trace/v1/trace_config.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; +option java_outer_classname = "TraceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tracing] +// Tracing :ref:`architecture overview `. + +// The tracing configuration specifies global +// settings for the HTTP tracer used by Envoy. The configuration is defined by +// the :ref:`Bootstrap ` :ref:`tracing +// ` field. Envoy may support other tracers +// in the future, but right now the HTTP tracer is the only one supported. +message Tracing { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.Tracing"; + + message Http { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.Tracing.Http"; + + reserved 2; + + reserved "config"; + + // The name of the HTTP trace driver to instantiate. The name must match a + // supported HTTP trace driver. Built-in trace drivers: + // + // - *envoy.tracers.lightstep* + // - *envoy.tracers.zipkin* + // - *envoy.tracers.dynamic_ot* + // - *envoy.tracers.datadog* + // - *envoy.tracers.opencensus* + // - *envoy.tracers.xray* + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Trace driver specific configuration which depends on the driver being instantiated. + // See the trace drivers for examples: + // + // - :ref:`LightstepConfig ` + // - :ref:`ZipkinConfig ` + // - :ref:`DynamicOtConfig ` + // - :ref:`DatadogConfig ` + // - :ref:`OpenCensusConfig ` + // - :ref:`AWS X-Ray ` + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + // Provides configuration for the HTTP tracer. + Http http = 1; +} + +// Configuration for the LightStep tracer. +// [#extension: envoy.tracers.lightstep] +message LightstepConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.LightstepConfig"; + + // Available propagation modes + enum PropagationMode { + // Propagate trace context in the single header x-ot-span-context. + ENVOY = 0; + + // Propagate trace context using LightStep's native format. + LIGHTSTEP = 1; + + // Propagate trace context using the b3 format. + B3 = 2; + + // Propagation trace context using the w3 trace-context standard. + TRACE_CONTEXT = 3; + } + + // The cluster manager cluster that hosts the LightStep collectors. + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // File containing the access token to the `LightStep + // `_ API. + string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Propagation modes to use by LightStep's tracer. + repeated PropagationMode propagation_modes = 3 + [(validate.rules).repeated = {items {enum {defined_only: true}}}]; +} + +// Configuration for the Zipkin tracer. +// [#extension: envoy.tracers.zipkin] +// [#next-free-field: 6] +message ZipkinConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.ZipkinConfig"; + + // Available Zipkin collector endpoint versions. + enum CollectorEndpointVersion { + // Zipkin API v1, JSON over HTTP. + // [#comment: The default implementation of Zipkin client before this field is added was only v1 + // and the way user configure this was by not explicitly specifying the version. Consequently, + // before this is added, the corresponding Zipkin collector expected to receive v1 payload. + // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when + // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, + // since in Zipkin realm this v1 version is considered to be not preferable anymore.] + hidden_envoy_deprecated_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 + [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; + + // Zipkin API v2, JSON over HTTP. + HTTP_JSON = 1; + + // Zipkin API v2, protobuf over HTTP. + HTTP_PROTO = 2; + + // [#not-implemented-hide:] + GRPC = 3; + } + + // The cluster manager cluster that hosts the Zipkin collectors. Note that the + // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster + // resources `. + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The API endpoint of the Zipkin service where the spans will be sent. When + // using a standard Zipkin installation, the API endpoint is typically + // /api/v1/spans, which is the default value. + string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Determines whether a 128bit trace id will be used when creating a new + // trace instance. The default value is false, which will result in a 64 bit trace id being used. + bool trace_id_128bit = 3; + + // Determines whether client and server spans will share the same span context. + // The default value is true. + google.protobuf.BoolValue shared_span_context = 4; + + // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be + // used. + CollectorEndpointVersion collector_endpoint_version = 5; +} + +// DynamicOtConfig is used to dynamically load a tracer from a shared library +// that implements the `OpenTracing dynamic loading API +// `_. +// [#extension: envoy.tracers.dynamic_ot] +message DynamicOtConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.DynamicOtConfig"; + + // Dynamic library implementing the `OpenTracing API + // `_. + string library = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The configuration to use when creating a tracer from the given dynamic + // library. + google.protobuf.Struct config = 2; +} + +// Configuration for the Datadog tracer. +// [#extension: envoy.tracers.datadog] +message DatadogConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.DatadogConfig"; + + // The cluster to use for submitting traces to the Datadog agent. + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The name used for the service when traces are generated by envoy. + string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Configuration for the OpenCensus tracer. +// [#next-free-field: 15] +// [#extension: envoy.tracers.opencensus] +message OpenCensusConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.OpenCensusConfig"; + + enum TraceContext { + // No-op default, no trace context is utilized. + NONE = 0; + + // W3C Trace-Context format "traceparent:" header. + TRACE_CONTEXT = 1; + + // Binary "grpc-trace-bin:" header. + GRPC_TRACE_BIN = 2; + + // "X-Cloud-Trace-Context:" header. + CLOUD_TRACE_CONTEXT = 3; + + // X-B3-* headers. + B3 = 4; + } + + reserved 7; + + // Configures tracing, e.g. the sampler, max number of annotations, etc. + opencensus.proto.trace.v1.TraceConfig trace_config = 1; + + // Enables the stdout exporter if set to true. This is intended for debugging + // purposes. + bool stdout_exporter_enabled = 2; + + // Enables the Stackdriver exporter if set to true. The project_id must also + // be set. + bool stackdriver_exporter_enabled = 3; + + // The Cloud project_id to use for Stackdriver tracing. + string stackdriver_project_id = 4; + + // (optional) By default, the Stackdriver exporter will connect to production + // Stackdriver. If stackdriver_address is non-empty, it will instead connect + // to this address, which is in the gRPC format: + // https://github.com/grpc/grpc/blob/master/doc/naming.md + string stackdriver_address = 10; + + // (optional) The gRPC server that hosts Stackdriver tracing service. Only + // Google gRPC is supported. If :ref:`target_uri ` + // is not provided, the default production Stackdriver address will be used. + core.v4alpha.GrpcService stackdriver_grpc_service = 13; + + // Enables the Zipkin exporter if set to true. The url and service name must + // also be set. + bool zipkin_exporter_enabled = 5; + + // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans" + string zipkin_url = 6; + + // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or + // ocagent_grpc_service must also be set. + bool ocagent_exporter_enabled = 11; + + // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC + // format: https://github.com/grpc/grpc/blob/master/doc/naming.md + // [#comment:TODO: deprecate this field] + string ocagent_address = 12; + + // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. + // This is only used if the ocagent_address is left empty. + core.v4alpha.GrpcService ocagent_grpc_service = 14; + + // List of incoming trace context headers we will accept. First one found + // wins. + repeated TraceContext incoming_trace_context = 8; + + // List of outgoing trace context headers we will produce. + repeated TraceContext outgoing_trace_context = 9; +} + +// Configuration structure. +message TraceServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v3.TraceServiceConfig"; + + // The upstream gRPC cluster that hosts the metrics service. + core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/config/trace/v4alpha/xray.proto b/generated_api_shadow/envoy/config/trace/v4alpha/xray.proto new file mode 100644 index 000000000000..39bcebd1bad7 --- /dev/null +++ b/generated_api_shadow/envoy/config/trace/v4alpha/xray.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.config.trace.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; +option java_outer_classname = "XrayProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: AWS X-Ray Tracer Configuration] +// Configuration for AWS X-Ray tracer + +message XRayConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.XRayConfig"; + + // The UDP endpoint of the X-Ray Daemon where the spans will be sent. + // If this value is not set, the default value of 127.0.0.1:2000 will be used. + core.v4alpha.SocketAddress daemon_endpoint = 1; + + // The name of the X-Ray segment. + string segment_name = 2 [(validate.rules).string = {min_len: 1}]; + + // The location of a local custom sampling rules JSON file. + // For an example of the sampling rules see: + // `X-Ray SDK documentation + // `_ + core.v4alpha.DataSource sampling_rule_manifest = 3; +} diff --git a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto b/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto index 0ef4d12cca9e..92d5fb83a49c 100644 --- a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto +++ b/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.transport_socket.alts.v2alpha; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v2alpha"; @@ -10,6 +11,7 @@ option java_outer_classname = "AltsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.alts.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: ALTS] // [#extension: envoy.transport_sockets.alts] diff --git a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto b/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto index 027bd9a5a4b8..1b3fd395d572 100644 --- a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto +++ b/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.config.transport_socket.raw_buffer.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.transport_socket.raw_buffer.v2"; option java_outer_classname = "RawBufferProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.raw_buffer.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Raw Buffer] // [#extension: envoy.transport_sockets.raw_buffer] diff --git a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto b/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto index 53add0a9f79e..0802c7558ad3 100644 --- a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto +++ b/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto @@ -5,9 +5,8 @@ package envoy.config.transport_socket.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/config/common/tap/v2alpha/common.proto"; -import "udpa/annotations/status.proto"; - import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha"; @@ -16,6 +15,7 @@ option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tap.v3"; option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap] // [#extension: envoy.transport_sockets.tap] diff --git a/generated_api_shadow/envoy/config/wasm/v2/wasm.proto b/generated_api_shadow/envoy/config/wasm/v2/wasm.proto index dc62325286d7..21960a44f30d 100644 --- a/generated_api_shadow/envoy/config/wasm/v2/wasm.proto +++ b/generated_api_shadow/envoy/config/wasm/v2/wasm.proto @@ -5,12 +5,12 @@ package envoy.config.wasm.v2; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/migrate.proto"; -import "validate/validate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.wasm.v2"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.wasm.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Wasm service] diff --git a/api/envoy/extensions/wasm/v3/BUILD b/generated_api_shadow/envoy/config/wasm/v3/BUILD similarity index 88% rename from api/envoy/extensions/wasm/v3/BUILD rename to generated_api_shadow/envoy/config/wasm/v3/BUILD index 8182a50ccaea..2c3dad6453b6 100644 --- a/api/envoy/extensions/wasm/v3/BUILD +++ b/generated_api_shadow/envoy/config/wasm/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/wasm/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/wasm/v3/wasm.proto b/generated_api_shadow/envoy/config/wasm/v3/wasm.proto similarity index 81% rename from api/envoy/extensions/wasm/v3/wasm.proto rename to generated_api_shadow/envoy/config/wasm/v3/wasm.proto index e334072dc5aa..56dfa6fc7e2a 100644 --- a/api/envoy/extensions/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/config/wasm/v3/wasm.proto @@ -1,24 +1,22 @@ syntax = "proto3"; -package envoy.extensions.wasm.v3; +package envoy.config.wasm.v3; import "envoy/config/core/v3/base.proto"; -import "udpa/annotations/versioning.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.wasm.v3"; +option java_package = "io.envoyproxy.envoy.config.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm service] // Configuration for a Wasm VM. // [#next-free-field: 6] message VmConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2.VmConfig"; - // An ID which will be used along with a hash of the wasm code (or null_vm_id) to determine which // VM will be used for the plugin. All plugins which use the same vm_id and code will use the same // VM. May be left blank. @@ -28,7 +26,7 @@ message VmConfig { string runtime = 2; // The Wasm code that Envoy will execute. - config.core.v3.AsyncDataSource code = 3; + core.v3.AsyncDataSource code = 3; // The Wasm configuration string used on initialization of a new VM (proxy_onStart). string configuration = 4; @@ -39,8 +37,6 @@ message VmConfig { // Base Configuration for Wasm Plugins, e.g. filters and services. message PluginConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2.PluginConfig"; - // A unique name for a filters/services in a VM for use in identifiying the filter/service if // multiple filters/services are handled by the same vm_id and root_id and for logging/debugging. string name = 1; @@ -62,8 +58,6 @@ message PluginConfig { // `. This opaque configuration will be used to // create a Wasm Service. message WasmService { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2.WasmService"; - // General plugin configuration. PluginConfig config = 1; diff --git a/generated_api_shadow/envoy/data/accesslog/v2/BUILD b/generated_api_shadow/envoy/data/accesslog/v2/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/generated_api_shadow/envoy/data/accesslog/v2/BUILD +++ b/generated_api_shadow/envoy/data/accesslog/v2/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto index 1c4e5ee13c73..af19197f62a6 100644 --- a/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto +++ b/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto @@ -10,11 +10,13 @@ import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.accesslog.v2"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC access logs] // Envoy access logs describe incoming interaction with Envoy over a fixed diff --git a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto index fb81c6360667..374569d937f2 100644 --- a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto @@ -10,13 +10,14 @@ import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.accesslog.v3"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC access logs] // Envoy access logs describe incoming interaction with Envoy over a fixed diff --git a/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto b/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto index cec29bccc73f..3ea8bc2597fd 100644 --- a/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto +++ b/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto @@ -6,12 +6,14 @@ import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.cluster.v2alpha"; option java_outer_classname = "OutlierDetectionEventProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.data.cluster.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Outlier detection logging events] // :ref:`Outlier detection logging `. diff --git a/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto b/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto index 6e9d59b0e15d..ae1ad4c94d17 100644 --- a/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto +++ b/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto @@ -5,13 +5,14 @@ package envoy.data.cluster.v3; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.cluster.v3"; option java_outer_classname = "OutlierDetectionEventProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Outlier detection logging events] // :ref:`Outlier detection logging `. diff --git a/generated_api_shadow/envoy/data/core/v2alpha/BUILD b/generated_api_shadow/envoy/data/core/v2alpha/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/generated_api_shadow/envoy/data/core/v2alpha/BUILD +++ b/generated_api_shadow/envoy/data/core/v2alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto b/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto index 777cb9c270df..00fd69fd42d3 100644 --- a/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto +++ b/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto @@ -6,11 +6,13 @@ import "envoy/api/v2/core/address.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.core.v2alpha"; option java_outer_classname = "HealthCheckEventProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health check logging events] // :ref:`Health check logging `. diff --git a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto index 7ad278876dcf..cff0e381bd19 100644 --- a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto +++ b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto @@ -6,13 +6,14 @@ import "envoy/config/core/v3/address.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.core.v3"; option java_outer_classname = "HealthCheckEventProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health check logging events] // :ref:`Health check logging `. @@ -41,27 +42,27 @@ message HealthCheckEvent { string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; + // Host ejection. + google.protobuf.Timestamp timestamp = 6; + oneof event { option (validate.required) = true; - // Host ejection. + // Host addition. HealthCheckEjectUnhealthy eject_unhealthy_event = 4; - // Host addition. + // Host failure. HealthCheckAddHealthy add_healthy_event = 5; - // Host failure. + // Healthy host became degraded. HealthCheckFailure health_check_failure_event = 7; - // Healthy host became degraded. + // A degraded host returned to being healthy. DegradedHealthyHost degraded_healthy_host = 8; - // A degraded host returned to being healthy. + // Timestamp for event. NoLongerDegradedHost no_longer_degraded_host = 9; } - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 6; } message HealthCheckEjectUnhealthy { diff --git a/generated_api_shadow/envoy/config/filter/network/wasm/v2/BUILD b/generated_api_shadow/envoy/data/dns/v2alpha/BUILD similarity index 87% rename from generated_api_shadow/envoy/config/filter/network/wasm/v2/BUILD rename to generated_api_shadow/envoy/data/dns/v2alpha/BUILD index 7903b3becced..702abad68ac1 100644 --- a/generated_api_shadow/envoy/config/filter/network/wasm/v2/BUILD +++ b/generated_api_shadow/envoy/data/dns/v2alpha/BUILD @@ -6,7 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/wasm/v2:pkg", + "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto b/generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto new file mode 100644 index 000000000000..7a9e535c4f3a --- /dev/null +++ b/generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto @@ -0,0 +1,74 @@ +syntax = "proto3"; + +package envoy.data.dns.v2alpha; + +import "envoy/type/matcher/string.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.data.dns.v2alpha"; +option java_outer_classname = "DnsTableProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: DNS Filter Table Data] +// :ref:`DNS Filter config overview `. + +// This message contains the configuration for the DNS Filter if populated +// from the control plane +message DnsTable { + // This message contains a list of IP addresses returned for a query for a known name + message AddressList { + // This field contains a well formed IP address that is returned + // in the answer for a name query. The address field can be an + // IPv4 or IPv6 address. Address family detection is done automatically + // when Envoy parses the string. Since this field is repeated, + // Envoy will return one randomly chosen entry from this list in the + // DNS response. The random index will vary per query so that we prevent + // clients pinning on a single address for a configured domain + repeated string address = 1 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // This message type is extensible and can contain a list of addresses + // or dictate some other method for resolving the addresses for an + // endpoint + message DnsEndpoint { + oneof endpoint_config { + option (validate.required) = true; + + AddressList address_list = 1; + } + } + + message DnsVirtualDomain { + // The domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + + // The configuration containing the method to determine the address + // of this endpoint + DnsEndpoint endpoint = 2; + + // Sets the TTL in dns answers from Envoy returned to the client + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + } + + // Control how many times envoy makes an attempt to forward a query to + // an external server + uint32 external_retry_count = 1; + + // Fully qualified domain names for which Envoy will respond to queries + repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + + // This field serves to help Envoy determine whether it can authoritatively + // answer a query for a name matching a suffix in this list. If the query + // name does not match a suffix in this list, Envoy will forward + // the query to an upstream DNS server + repeated type.matcher.StringMatcher known_suffixes = 3; +} diff --git a/generated_api_shadow/envoy/data/dns/v3/BUILD b/generated_api_shadow/envoy/data/dns/v3/BUILD new file mode 100644 index 000000000000..d61d877fef2e --- /dev/null +++ b/generated_api_shadow/envoy/data/dns/v3/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/data/dns/v2alpha:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto new file mode 100644 index 000000000000..a6457e118672 --- /dev/null +++ b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto @@ -0,0 +1,85 @@ +syntax = "proto3"; + +package envoy.data.dns.v3; + +import "envoy/type/matcher/v3/string.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.data.dns.v3"; +option java_outer_classname = "DnsTableProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: DNS Filter Table Data] +// :ref:`DNS Filter config overview `. + +// This message contains the configuration for the DNS Filter if populated +// from the control plane +message DnsTable { + option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable"; + + // This message contains a list of IP addresses returned for a query for a known name + message AddressList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v2alpha.DnsTable.AddressList"; + + // This field contains a well formed IP address that is returned + // in the answer for a name query. The address field can be an + // IPv4 or IPv6 address. Address family detection is done automatically + // when Envoy parses the string. Since this field is repeated, + // Envoy will return one randomly chosen entry from this list in the + // DNS response. The random index will vary per query so that we prevent + // clients pinning on a single address for a configured domain + repeated string address = 1 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // This message type is extensible and can contain a list of addresses + // or dictate some other method for resolving the addresses for an + // endpoint + message DnsEndpoint { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v2alpha.DnsTable.DnsEndpoint"; + + oneof endpoint_config { + option (validate.required) = true; + + AddressList address_list = 1; + } + } + + message DnsVirtualDomain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; + + // The domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + + // The configuration containing the method to determine the address + // of this endpoint + DnsEndpoint endpoint = 2; + + // Sets the TTL in dns answers from Envoy returned to the client + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + } + + // Control how many times envoy makes an attempt to forward a query to + // an external server + uint32 external_retry_count = 1; + + // Fully qualified domain names for which Envoy will respond to queries + repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + + // This field serves to help Envoy determine whether it can authoritatively + // answer a query for a name matching a suffix in this list. If the query + // name does not match a suffix in this list, Envoy will forward + // the query to an upstream DNS server + repeated type.matcher.v3.StringMatcher known_suffixes = 3; +} diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/BUILD b/generated_api_shadow/envoy/data/tap/v2alpha/BUILD index 97eb16ccddad..69168ad0cf24 100644 --- a/generated_api_shadow/envoy/data/tap/v2alpha/BUILD +++ b/generated_api_shadow/envoy/data/tap/v2alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/api/v2/core:pkg"], + deps = [ + "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/common.proto b/generated_api_shadow/envoy/data/tap/v2alpha/common.proto index 93e33a2ea8d2..7c02aa771954 100644 --- a/generated_api_shadow/envoy/data/tap/v2alpha/common.proto +++ b/generated_api_shadow/envoy/data/tap/v2alpha/common.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.data.tap.v2alpha; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap common data] diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/http.proto b/generated_api_shadow/envoy/data/tap/v2alpha/http.proto index bde7006e1093..60ea68b66d4a 100644 --- a/generated_api_shadow/envoy/data/tap/v2alpha/http.proto +++ b/generated_api_shadow/envoy/data/tap/v2alpha/http.proto @@ -5,9 +5,12 @@ package envoy.data.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/data/tap/v2alpha/common.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP tap data] diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto b/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto index dfc7f0780611..82c2845ee338 100644 --- a/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto +++ b/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto @@ -7,9 +7,12 @@ import "envoy/data/tap/v2alpha/common.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "TransportProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Transport tap data] // Trace format for the tap transport socket extension. This dumps plain text read/write diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto b/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto index 13d39b8d3585..769b95c6160a 100644 --- a/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto +++ b/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto @@ -5,11 +5,13 @@ package envoy.data.tap.v2alpha; import "envoy/data/tap/v2alpha/http.proto"; import "envoy/data/tap/v2alpha/transport.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "WrapperProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap data wrappers] diff --git a/generated_api_shadow/envoy/data/tap/v3/common.proto b/generated_api_shadow/envoy/data/tap/v3/common.proto index 85c1c39c5ee2..c954b1b6747d 100644 --- a/generated_api_shadow/envoy/data/tap/v3/common.proto +++ b/generated_api_shadow/envoy/data/tap/v3/common.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.data.tap.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap common data] @@ -15,21 +17,21 @@ option java_multiple_files = true; message Body { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Body"; - oneof body_type { - // Body data as bytes. By default, tap body data will be present in this field, as the proto - // `bytes` type can contain any valid byte. - bytes as_bytes = 1; + // Body data as bytes. By default, tap body data will be present in this field, as the proto + // `bytes` type can contain any valid byte. + bool truncated = 3; + oneof body_type { // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING // ` sink // format type is selected. See the documentation for that option for why this is useful. + bytes as_bytes = 1; + + // Specifies whether body data has been truncated to fit within the specified + // :ref:`max_buffered_rx_bytes + // ` and + // :ref:`max_buffered_tx_bytes + // ` settings. string as_string = 2; } - - // Specifies whether body data has been truncated to fit within the specified - // :ref:`max_buffered_rx_bytes - // ` and - // :ref:`max_buffered_tx_bytes - // ` settings. - bool truncated = 3; } diff --git a/generated_api_shadow/envoy/data/tap/v3/http.proto b/generated_api_shadow/envoy/data/tap/v3/http.proto index a84bd9b10195..d4f05fa09522 100644 --- a/generated_api_shadow/envoy/data/tap/v3/http.proto +++ b/generated_api_shadow/envoy/data/tap/v3/http.proto @@ -5,11 +5,13 @@ package envoy.data.tap.v3; import "envoy/config/core/v3/base.proto"; import "envoy/data/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP tap data] diff --git a/generated_api_shadow/envoy/data/tap/v3/transport.proto b/generated_api_shadow/envoy/data/tap/v3/transport.proto index ee5125edfe87..f596759cb490 100644 --- a/generated_api_shadow/envoy/data/tap/v3/transport.proto +++ b/generated_api_shadow/envoy/data/tap/v3/transport.proto @@ -7,11 +7,13 @@ import "envoy/data/tap/v3/common.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "TransportProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Transport tap data] // Trace format for the tap transport socket extension. This dumps plain text read/write diff --git a/generated_api_shadow/envoy/data/tap/v3/wrapper.proto b/generated_api_shadow/envoy/data/tap/v3/wrapper.proto index 3320833aab62..636547614c26 100644 --- a/generated_api_shadow/envoy/data/tap/v3/wrapper.proto +++ b/generated_api_shadow/envoy/data/tap/v3/wrapper.proto @@ -5,13 +5,14 @@ package envoy.data.tap.v3; import "envoy/data/tap/v3/http.proto"; import "envoy/data/tap/v3/transport.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "WrapperProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap data wrappers] diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto index 1bcf1afd9422..f3c9c0a11612 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto @@ -4,13 +4,14 @@ package envoy.extensions.access_loggers.file.v3; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v3"; option java_outer_classname = "FileProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: File access log] // [#extension: envoy.access_loggers.file] diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto index 7e059bb55cdc..3cc154416627 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto @@ -7,13 +7,14 @@ import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.access_loggers.grpc.v3"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Access Log Service (ALS)] diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD index c25dbab0011c..9c848eaafb17 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD @@ -7,7 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v2:pkg", - "//envoy/extensions/wasm/v3:pkg", + "//envoy/config/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto index 19430e582090..2513a17a7fe6 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto @@ -2,17 +2,15 @@ syntax = "proto3"; package envoy.extensions.access_loggers.wasm.v3; -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "google/protobuf/struct.proto"; +import "envoy/config/wasm/v3/wasm.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - option java_package = "io.envoyproxy.envoy.extensions.access_loggers.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm access log] @@ -23,5 +21,5 @@ message WasmAccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.WasmAccessLog"; - envoy.extensions.wasm.v3.PluginConfig config = 1; + config.wasm.v3.PluginConfig config = 1; } diff --git a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto b/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto index 0d00e7b444ba..aead1c451739 100644 --- a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto +++ b/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.clusters.aggregate.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.clusters.aggregate.v3"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Aggregate cluster configuration] diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto index bbd1c833a7fd..6f100d9dbb7e 100644 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto +++ b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto @@ -4,13 +4,14 @@ package envoy.extensions.clusters.dynamic_forward_proxy.v3; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.clusters.dynamic_forward_proxy.v3"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy cluster configuration] diff --git a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto b/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto index 7975b2eb659f..cf01359e55ab 100644 --- a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto +++ b/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto @@ -5,13 +5,14 @@ package envoy.extensions.clusters.redis.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.clusters.redis.v3"; option java_outer_classname = "RedisClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Redis Cluster Configuration] // This cluster adds support for `Redis Cluster `_, as part diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 7ec4a4fcd817..7c72af35af33 100644 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -7,13 +7,14 @@ import "envoy/config/cluster/v3/cluster.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.dynamic_forward_proxy.v3"; option java_outer_classname = "DnsCacheProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy common configuration] @@ -48,7 +49,12 @@ message DnsCacheConfig { // // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be // added in a future change. - google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gt {}}]; + // + // .. note: + // + // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. + google.protobuf.Duration dns_refresh_rate = 3 + [(validate.rules).duration = {gte {nanos: 1000000}}]; // The TTL for hosts that are unused. Hosts that have not been used in the configured time // interval will be purged. If not specified defaults to 5m. diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto index 1410e24b12a4..187ae3f229c4 100644 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.common.ratelimit.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.ratelimit.v3"; option java_outer_classname = "RatelimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common rate limit components] diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto index 6e951c5e475d..46a25b164d67 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto @@ -5,13 +5,14 @@ package envoy.extensions.common.tap.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.tap.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common tap extension configuration] diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD new file mode 100644 index 000000000000..d1fe49142a8e --- /dev/null +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/tap/v3:pkg", + "//envoy/extensions/common/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto new file mode 100644 index 000000000000..63de14a3d6f6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package envoy.extensions.common.tap.v4alpha; + +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/tap/v3/common.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.common.tap.v4alpha"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common tap extension configuration] + +// Common configuration for all tap extensions. +message CommonExtensionConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.common.tap.v3.CommonExtensionConfig"; + + // [#not-implemented-hide:] + message TapDSConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.common.tap.v3.CommonExtensionConfig.TapDSConfig"; + + // Configuration for the source of TapDS updates for this Cluster. + config.core.v4alpha.ConfigSource config_source = 1 + [(validate.rules).message = {required: true}]; + + // Tap config to request from XDS server. + string name = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + oneof config_type { + option (validate.required) = true; + + // If specified, the tap filter will be configured via an admin handler. + AdminConfig admin_config = 1; + + // If specified, the tap filter will be configured via a static configuration that cannot be + // changed. + config.tap.v3.TapConfig static_config = 2; + + // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. + TapDSConfig tapds_config = 3; + } +} + +// Configuration for the admin handler. See :ref:`here ` for +// more information. +message AdminConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.common.tap.v3.AdminConfig"; + + // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is + // matched to the configured filter opaque ID to determine which filter to configure. + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; +} diff --git a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD new file mode 100644 index 000000000000..d011b4d830ad --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", + "//envoy/data/dns/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto new file mode 100644 index 000000000000..38a8872d323e --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package envoy.extensions.filter.udp.dns_filter.v3alpha; + +import "envoy/config/core/v3/base.proto"; +import "envoy/data/dns/v3/dns_table.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filter.udp.dns_filter.v3alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig"; + + // This message contains the configuration for the Dns Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig.ServerContextConfig"; + + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v3.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + config.core.v3.DataSource external_dns_table = 2; + } + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration + ServerContextConfig server_config = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto index 17230ebfacaa..a5a688468fb4 100644 --- a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto +++ b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto @@ -6,14 +6,15 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.common.fault.v3"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common fault injection types] @@ -39,27 +40,26 @@ message FaultDelay { reserved 2; - // Unused and deprecated. Will be removed in the next release. + // Add a fixed delay before forwarding the operation upstream. See + // https://developers.google.com/protocol-buffers/docs/proto3#json for + // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified + // delay will be injected before a new request/operation. For TCP + // connections, the proxying of the connection upstream will be delayed + // for the specified period. This is required if type is FIXED. + type.v3.FractionalPercent percentage = 4; + + // Fault delays are controlled via an HTTP header (if applicable). FaultDelayType hidden_envoy_deprecated_type = 1 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; oneof fault_delay_secifier { option (validate.required) = true; - // Add a fixed delay before forwarding the operation upstream. See - // https://developers.google.com/protocol-buffers/docs/proto3#json for - // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified - // delay will be injected before a new request/operation. For TCP - // connections, the proxying of the connection upstream will be delayed - // for the specified period. This is required if type is FIXED. + // The percentage of operations/connections/requests on which the delay will be injected. google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; - // Fault delays are controlled via an HTTP header (if applicable). HeaderDelay header_delay = 5; } - - // The percentage of operations/connections/requests on which the delay will be injected. - type.v3.FractionalPercent percentage = 4; } // Describes a rate limit to be applied. @@ -84,16 +84,16 @@ message FaultRateLimit { "envoy.config.filter.fault.v2.FaultRateLimit.HeaderLimit"; } + // A fixed rate limit. + type.v3.FractionalPercent percentage = 2; + oneof limit_type { option (validate.required) = true; - // A fixed rate limit. + // Rate limits are controlled via an HTTP header (if applicable). FixedLimit fixed_limit = 1; - // Rate limits are controlled via an HTTP header (if applicable). + // The percentage of operations/connections/requests on which the rate limit will be injected. HeaderLimit header_limit = 3; } - - // The percentage of operations/connections/requests on which the rate limit will be injected. - type.v3.FractionalPercent percentage = 2; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index e806c9761138..7ff9bb6a0f5f 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -9,13 +9,14 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.adaptive_concurrency.v3"; option java_outer_classname = "AdaptiveConcurrencyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Adaptive Concurrency] // Adaptive Concurrency Control :ref:`configuration overview @@ -92,15 +93,15 @@ message AdaptiveConcurrency { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency"; + // Gradient concurrency control will be used. + config.core.v3.RuntimeFeatureFlag enabled = 2; + oneof concurrency_controller_config { option (validate.required) = true; - // Gradient concurrency control will be used. + // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the + // message is unspecified, the filter will be enabled. GradientControllerConfig gradient_controller_config = 1 [(validate.rules).message = {required: true}]; } - - // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the - // message is unspecified, the filter will be enabled. - config.core.v3.RuntimeFeatureFlag enabled = 2; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto b/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto index f41639d9b5b1..b4b9cc398f2e 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.aws_lambda.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_lambda.v3"; option java_outer_classname = "AwsLambdaProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AWS Lambda] // AWS Lambda :ref:`configuration overview `. @@ -19,6 +20,17 @@ message Config { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.aws_lambda.v2alpha.Config"; + enum InvocationMode { + // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In + // this mode the output of the Lambda function becomes the response of the HTTP request. + SYNCHRONOUS = 0; + + // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be + // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the + // call which is translated to an HTTP 200 OK by the filter. + ASYNCHRONOUS = 1; + } + // The ARN of the AWS Lambda to invoke when the filter is engaged // Must be in the following format: // arn::lambda:::function: @@ -26,6 +38,9 @@ message Config { // Whether to transform the request (headers and body) to a JSON payload or pass it as is. bool payload_passthrough = 2; + + // Determines the way to invoke the Lambda function. + InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; } // Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto b/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto index e46ef3170262..b80bc1b82108 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.aws_request_signing.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_request_signing.v3"; option java_outer_classname = "AwsRequestSigningProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AwsRequestSigning] // AwsRequestSigning :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto b/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto index 59ffa83ac3f2..6f73244032c4 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.buffer.v3; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.buffer.v3"; option java_outer_classname = "BufferProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Buffer] // Buffer :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto index 26016442bb0a..1ff305bb0e27 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto @@ -9,13 +9,13 @@ import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha"; option java_outer_classname = "CacheProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Cache Filter] // [#extension: envoy.filters.http.cache] diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto index 7d506a2152af..0eefe55140d2 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto @@ -6,11 +6,13 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v3"; option java_outer_classname = "CompressorProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Compressor] diff --git a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto b/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto index fd41e76449ae..0269e1bdfd8c 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.http.cors.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.cors.v3"; option java_outer_classname = "CorsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cors] // CORS Filter :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto b/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto index 7748abf88d36..263d705e3f54 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto @@ -5,13 +5,14 @@ package envoy.extensions.filters.http.csrf.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/matcher/v3/string.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v3"; option java_outer_classname = "CsrfProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: CSRF] // Cross-Site Request Forgery :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto index 35e8a608d473..b8a2525dbf54 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.dynamic_forward_proxy.v3; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamic_forward_proxy.v3"; option java_outer_classname = "DynamicForwardProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy] diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto b/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto index baf977e00334..13a4f1c6ceee 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.http.dynamo.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamo.v3"; option java_outer_classname = "DynamoProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamo] // Dynamo :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index a13f7073d81b..64e82c7b1614 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -8,14 +8,15 @@ import "envoy/config/core/v3/http_uri.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v3"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. @@ -26,14 +27,11 @@ message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; - // External authorization service configuration. - oneof services { - // gRPC service configuration (default timeout: 200ms). - config.core.v3.GrpcService grpc_service = 1; + // gRPC service configuration (default timeout: 200ms). + bool failure_mode_allow = 2; - // HTTP service configuration (default timeout: 200ms). - HttpService http_service = 3; - } + // HTTP service configuration (default timeout: 200ms). + BufferSettings with_request_body = 5; // Changes filter's behaviour on errors: // @@ -47,19 +45,12 @@ message ExtAuthz { // // Note that errors can be *always* tracked in the :ref:`stats // `. - bool failure_mode_allow = 2; - - // Sets the package version the gRPC service should use. This is particularly - // useful when transitioning from alpha to release versions assuming that both definitions are - // semantically compatible. Deprecation note: This field is deprecated and should only be used for - // version upgrade. See release notes for more details. - bool hidden_envoy_deprecated_use_alpha = 4 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + bool clear_route_cache = 6; // Enables filter to buffer the client request body and send it within the authorization request. // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization // request message indicating if the body data is partial. - BufferSettings with_request_body = 5; + type.v3.HttpStatus status_on_error = 7; // Clears route cache in order to allow the external authorization service to correctly affect // routing decisions. Filter clears all cached routes when: @@ -71,11 +62,11 @@ message ExtAuthz { // 3. At least one *authorization response header* is added to the client request, or is used for // altering another client request header. // - bool clear_route_cache = 6; + repeated string metadata_context_namespaces = 8; // Sets the HTTP status that is returned to the client when there is a network error between the // filter and the authorization server. The default status is HTTP 403 Forbidden. - type.v3.HttpStatus status_on_error = 7; + config.core.v3.RuntimeFractionalPercent filter_enabled = 9; // Specifies a list of metadata namespaces whose values, if present, will be passed to the // ext_authz service as an opaque *protobuf::Struct*. @@ -89,7 +80,7 @@ message ExtAuthz { // metadata_context_namespaces: // - envoy.filters.http.jwt_authn // - repeated string metadata_context_namespaces = 8; + bool include_peer_certificate = 10; // Specifies if the filter is enabled. // @@ -97,13 +88,19 @@ message ExtAuthz { // Envoy will lookup the runtime key to get the percentage of requests to filter. // // If this field is not specified, the filter will be enabled for all requests. - config.core.v3.RuntimeFractionalPercent filter_enabled = 9; + bool hidden_envoy_deprecated_use_alpha = 4 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 10; + // External authorization service configuration. + oneof services { + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + config.core.v3.GrpcService grpc_service = 1; + + HttpService http_service = 3; + } } // Configuration for buffering the request data. diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto index 6127ca848460..07996a9507ff 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto @@ -8,13 +8,14 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v3"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Fault Injection] // Fault Injection :ref:`configuration overview `. @@ -34,19 +35,19 @@ message FaultAbort { reserved 1; + // HTTP status code to use to abort the HTTP request. + type.v3.FractionalPercent percentage = 3; + oneof error_type { option (validate.required) = true; - // HTTP status code to use to abort the HTTP request. + // Fault aborts are controlled via an HTTP header (if applicable). uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - // Fault aborts are controlled via an HTTP header (if applicable). + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. HeaderAbort header_abort = 4; } - - // The percentage of requests/operations/connections that will be aborted with the error code - // provided. - type.v3.FractionalPercent percentage = 3; } // [#next-free-field: 14] diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto index 3835db1c0584..7e31da49e92b 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_http1_bridge.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_bridge.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC HTTP/1.1 Bridge] // gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto index 88a09075d6f4..85d7cbe1cecd 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] // gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto index 5da637cefeb2..da27441f2aca 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_json_transcoder.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_json_transcoder.v3"; option java_outer_classname = "TranscoderProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC-JSON transcoder] // gRPC-JSON transcoder :ref:`configuration overview `. @@ -44,36 +45,32 @@ message GrpcJsonTranscoder { bool preserve_proto_field_names = 4; } - oneof descriptor_set { - option (validate.required) = true; - - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - string proto_descriptor = 1; + // Supplies the filename of + // :ref:`the proto descriptor set ` for the gRPC + // services. + repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - bytes proto_descriptor_bin = 4; - } + // Supplies the binary content of + // :ref:`the proto descriptor set ` for the gRPC + // services. + PrintOptions print_options = 3; // A list of strings that // supplies the fully qualified service names (i.e. "package_name.service_name") that // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than // the service names specified here, but they won't be translated. - repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; + bool match_incoming_request_route = 5; // Control options for response JSON. These options are passed directly to // `JsonPrintOptions `_. - PrintOptions print_options = 3; + repeated string ignored_query_parameters = 6; // Whether to keep the incoming request route after the outgoing headers have been transformed to // the match the upstream gRPC service. Note: This means that routes for gRPC services that are // not transcoded cannot be used in combination with *match_incoming_request_route*. - bool match_incoming_request_route = 5; + bool auto_mapping = 7; // A list of query parameters to be ignored for transcoding method mapping. // By default, the transcoder filter will not transcode a request if there are any @@ -100,7 +97,7 @@ message GrpcJsonTranscoder { // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow // the same request to be mapped to ``GetShelf``. - repeated string ignored_query_parameters = 6; + bool ignore_unknown_query_parameters = 8; // Whether to route methods without the ``google.api.http`` option. // @@ -122,41 +119,45 @@ message GrpcJsonTranscoder { // // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. - bool auto_mapping = 7; + bool convert_grpc_status = 9; - // Whether to ignore query parameters that cannot be mapped to a corresponding - // protobuf field. Use this if you cannot control the query parameters and do - // not know them beforehand. Otherwise use ``ignored_query_parameters``. - // Defaults to false. - bool ignore_unknown_query_parameters = 8; + oneof descriptor_set { + option (validate.required) = true; - // Whether to convert gRPC status headers to JSON. - // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` - // from the ``grpc-status-details-bin`` header and use it as JSON body. - // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and - // ``grpc-message`` headers. - // The error details types must be present in the ``proto_descriptor``. - // - // For example, if an upstream server replies with headers: - // - // .. code-block:: none - // - // grpc-status: 5 - // grpc-status-details-bin: - // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ - // - // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message - // ``google.rpc.Status``. It will be transcoded into: - // - // .. code-block:: none - // - // HTTP/1.1 404 Not Found - // content-type: application/json - // - // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} - // - // In order to transcode the message, the ``google.rpc.RequestInfo`` type from - // the ``google/rpc/error_details.proto`` should be included in the configured - // :ref:`proto descriptor set `. - bool convert_grpc_status = 9; + // Whether to ignore query parameters that cannot be mapped to a corresponding + // protobuf field. Use this if you cannot control the query parameters and do + // not know them beforehand. Otherwise use ``ignored_query_parameters``. + // Defaults to false. + string proto_descriptor = 1; + + // Whether to convert gRPC status headers to JSON. + // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` + // from the ``grpc-status-details-bin`` header and use it as JSON body. + // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and + // ``grpc-message`` headers. + // The error details types must be present in the ``proto_descriptor``. + // + // For example, if an upstream server replies with headers: + // + // .. code-block:: none + // + // grpc-status: 5 + // grpc-status-details-bin: + // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ + // + // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message + // ``google.rpc.Status``. It will be transcoded into: + // + // .. code-block:: none + // + // HTTP/1.1 404 Not Found + // content-type: application/json + // + // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} + // + // In order to transcode the message, the ``google.rpc.RequestInfo`` type from + // the ``google/rpc/error_details.proto`` should be included in the configured + // :ref:`proto descriptor set `. + bytes proto_descriptor_bin = 4; + } } diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD index 6416ce6b09a5..cfae56e4cac3 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/grpc_stats/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto index bc3574562b32..1fecdaea0a16 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto @@ -2,13 +2,18 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_stats.v3; -import "udpa/annotations/versioning.proto"; +import "envoy/config/core/v3/grpc_method_list.proto"; + +import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_stats.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC statistics] gRPC statistics filter // :ref:`configuration overview `. @@ -22,6 +27,33 @@ message FilterConfig { // If true, the filter maintains a filter state object with the request and response message // counts. bool emit_filter_state = 1; + + oneof per_method_stat_specifier { + // If set, specifies an allowlist of service/methods that will have individual stats + // emitted for them. Any call that does not match the allowlist will be counted + // in a stat with no method specifier: `cluster..grpc.*`. + config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2; + + // If set to true, emit stats for all service/method names. + // + // If set to false, emit stats for all service/message types to the same stats without including + // the service/method in the name, with prefix `cluster..grpc`. This can be useful if + // service/method granularity is not needed, or if each cluster only receives a single method. + // + // .. attention:: + // This option is only safe if all clients are trusted. If this option is enabled + // with untrusted clients, the clients could cause unbounded growth in the number of stats in + // Envoy, using unbounded memory and potentially slowing down stats pipelines. + // + // .. attention:: + // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the + // behavior will default to `stats_for_all_methods=true`. This default value is deprecated, + // and in a future release, if neither field is set, it will default to + // `stats_for_all_methods=false` in order to be safe by default. This behavior can be + // controlled with runtime override + // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. + google.protobuf.BoolValue stats_for_all_methods = 3; + } } // gRPC statistics filter state object in protobuf form. diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto index 2c881a78d56e..8161139f547b 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.http.grpc_web.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_web.v3"; option java_outer_classname = "GrpcWebProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Web] // gRPC Web :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto index f0726d4ca5f2..3206037723de 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto @@ -6,13 +6,14 @@ import "envoy/extensions/filters/http/compressor/v3/compressor.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.gzip.v3"; option java_outer_classname = "GzipProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Gzip] // Gzip :ref:`configuration overview `. @@ -45,12 +46,6 @@ message Gzip { // use more memory, but are faster and produce better compression results. The default value is 5. google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - google.protobuf.UInt32Value hidden_envoy_deprecated_content_length = 2 [deprecated = true]; - // A value used for selecting the zlib compression level. This setting will affect speed and // amount of compression applied to the content. "BEST" provides higher compression at the cost of // higher latency, "SPEED" provides lower compression with minimum impact on response time. @@ -66,29 +61,6 @@ message Gzip { // refer to zlib manual. CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml". - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - repeated string hidden_envoy_deprecated_content_type = 6 [deprecated = true]; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - bool hidden_envoy_deprecated_disable_on_etag_header = 7 [deprecated = true]; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - bool hidden_envoy_deprecated_remove_accept_encoding_header = 8 [deprecated = true]; - // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. // Larger window results in better compression at the expense of memory usage. The default is 12 // which will produce a 4096 bytes window. For more details about this parameter, please refer to @@ -99,4 +71,12 @@ message Gzip { // the fields `content_length`, `content_type`, `disable_on_etag_header` and // `remove_accept_encoding_header` are ignored. compressor.v3.Compressor compressor = 10; + + google.protobuf.UInt32Value hidden_envoy_deprecated_content_length = 2 [deprecated = true]; + + repeated string hidden_envoy_deprecated_content_type = 6 [deprecated = true]; + + bool hidden_envoy_deprecated_disable_on_etag_header = 7 [deprecated = true]; + + bool hidden_envoy_deprecated_remove_accept_encoding_header = 8 [deprecated = true]; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 249c61298087..8e7c490f01b6 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.header_to_metadata.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v3"; option java_outer_classname = "HeaderToMetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Header-To-Metadata Filter] // @@ -77,7 +78,8 @@ message Config { "envoy.config.filter.http.header_to_metadata.v2.Config.Rule"; // The header that triggers this rule — required. - string header = 1 [(validate.rules).string = {min_bytes: 1}]; + string header = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If the header is present, apply this metadata KeyValuePair. // diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto index 87aac0de0232..1a5dbf1bb900 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto @@ -8,13 +8,14 @@ import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v3"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto b/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto index 9f77f7ba763b..a23ad9dea0a9 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.ip_tagging.v3; import "envoy/config/core/v3/address.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ip_tagging.v3"; option java_outer_classname = "IpTaggingProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: IP tagging] // IP tagging :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 577e857c0e49..802a582a572a 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -9,13 +9,14 @@ import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: JWT Authentication] // JWT Authentication :ref:`configuration overview `. @@ -77,50 +78,44 @@ message JwtProvider { // repeated string audiences = 2; - // `JSON Web Key Set (JWKS) `_ is needed to - // validate signature of a JWT. This field specifies where to fetch JWKS. - oneof jwks_source_specifier { - option (validate.required) = true; - - // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP - // URI and how the fetched JWKS should be cached. - // - // Example: - // - // .. code-block:: yaml - // - // remote_jwks: - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // cache_duration: - // seconds: 300 - // - RemoteJwks remote_jwks = 3; + // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP + // URI and how the fetched JWKS should be cached. + // + // Example: + // + // .. code-block:: yaml + // + // remote_jwks: + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // cache_duration: + // seconds: 300 + // + bool forward = 5; - // JWKS is in local data source. It could be either in a local file or embedded in the - // inline_string. - // - // Example: local file - // - // .. code-block:: yaml - // - // local_jwks: - // filename: /etc/envoy/jwks/jwks1.txt - // - // Example: inline_string - // - // .. code-block:: yaml - // - // local_jwks: - // inline_string: ACADADADADA - // - config.core.v3.DataSource local_jwks = 4; - } + // JWKS is in local data source. It could be either in a local file or embedded in the + // inline_string. + // + // Example: local file + // + // .. code-block:: yaml + // + // local_jwks: + // filename: /etc/envoy/jwks/jwks1.txt + // + // Example: inline_string + // + // .. code-block:: yaml + // + // local_jwks: + // inline_string: ACADADADADA + // + repeated JwtHeader from_headers = 6; // If false, the JWT is removed in the request after a success verification. If true, the JWT is // not removed in the request. Default value is false. - bool forward = 5; + repeated string from_params = 7; // Two fields below define where to extract the JWT from an HTTP request. // @@ -147,7 +142,7 @@ message JwtProvider { // // ``x-goog-iap-jwt-assertion: ``. // - repeated JwtHeader from_headers = 6; + string forward_payload_header = 8; // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. // @@ -162,33 +157,39 @@ message JwtProvider { // // /path?jwt_token= // - repeated string from_params = 7; + string payload_in_metadata = 9; - // This field specifies the header name to forward a successfully verified JWT payload to the - // backend. The forwarded data is:: - // - // base64url_encoded(jwt_payload_in_JSON) - // - // If it is not specified, the payload will not be forwarded. - string forward_payload_header = 8; + // `JSON Web Key Set (JWKS) `_ is needed to + // validate signature of a JWT. This field specifies where to fetch JWKS. + oneof jwks_source_specifier { + option (validate.required) = true; - // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata - // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** - // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* - // and the value is the *protobuf::Struct* converted from JWT JSON payload. - // - // For example, if payload_in_metadata is *my_payload*: - // - // .. code-block:: yaml - // - // envoy.filters.http.jwt_authn: - // my_payload: - // iss: https://example.com - // sub: test@example.com - // aud: https://example.com - // exp: 1501281058 - // - string payload_in_metadata = 9; + // This field specifies the header name to forward a successfully verified JWT payload to the + // backend. The forwarded data is:: + // + // base64url_encoded(jwt_payload_in_JSON) + // + // If it is not specified, the payload will not be forwarded. + RemoteJwks remote_jwks = 3; + + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata + // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** + // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* + // and the value is the *protobuf::Struct* converted from JWT JSON payload. + // + // For example, if payload_in_metadata is *my_payload*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // + config.core.v3.DataSource local_jwks = 4; + } } // This message specifies how to fetch JWKS from remote and how to cache it. diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto index a05081402d75..da6b0c09a0f6 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.lua.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.lua.v3"; option java_outer_classname = "LuaProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Lua] // Lua :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto b/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto index 81c169489c53..5c6b96540c19 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.on_demand.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.on_demand.v3"; option java_outer_classname = "OnDemandProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: OnDemand] // IP tagging :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto b/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto index 33d122f2d6a9..507c9728fbbf 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.original_src.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.original_src.v3"; option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index fda94812818e..057b7c3d4403 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -6,13 +6,14 @@ import "envoy/config/ratelimit/v3/rls.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ratelimit.v3"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto b/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto index 6d5ec317e970..bae67ec5a0a9 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.rbac.v3; import "envoy/config/rbac/v3/rbac.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v3"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/BUILD new file mode 100644 index 000000000000..bd16c3f2a0d6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/rbac/v4alpha:pkg", + "//envoy/extensions/filters/http/rbac/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto new file mode 100644 index 000000000000..ec65f5d7bcb6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.rbac.v4alpha; + +import "envoy/config/rbac/v4alpha/rbac.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v4alpha"; +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: RBAC] +// Role-Based Access Control :ref:`configuration overview `. +// [#extension: envoy.filters.http.rbac] + +// RBAC filter config. +message RBAC { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.rbac.v3.RBAC"; + + // Specify the RBAC rules to be applied globally. + // If absent, no enforcing RBAC policy will be applied. + config.rbac.v4alpha.RBAC rules = 1; + + // Shadow rules are not enforced by the filter (i.e., returning a 403) + // but will emit stats and logs and can be used for rule testing. + // If absent, no shadow RBAC policy will be applied. + config.rbac.v4alpha.RBAC shadow_rules = 2; +} + +message RBACPerRoute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.rbac.v3.RBACPerRoute"; + + reserved 1; + + // Override the global configuration of the filter with this new config. + // If absent, the global RBAC policy will be disabled for this route. + RBAC rbac = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto b/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto index f26d86630623..6ab64f92f2b0 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto @@ -6,13 +6,14 @@ import "envoy/config/accesslog/v3/accesslog.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v3"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] // Router :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto b/generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto index 9222b786b0c1..0ea335a414fa 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto @@ -5,13 +5,14 @@ package envoy.extensions.filters.http.squash.v3; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.squash.v3"; option java_outer_classname = "SquashProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Squash] // Squash :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto b/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto index 1f92e910b862..81779443e4a5 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.http.tap.v3; import "envoy/extensions/common/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/BUILD new file mode 100644 index 000000000000..5204b739b76c --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/common/tap/v4alpha:pkg", + "//envoy/extensions/filters/http/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/tap.proto b/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/tap.proto new file mode 100644 index 000000000000..98798be8bfd2 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/tap.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.tap.v4alpha; + +import "envoy/extensions/common/tap/v4alpha/common.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap] +// Tap :ref:`configuration overview `. +// [#extension: envoy.filters.http.tap] + +// Top level configuration for the tap filter. +message Tap { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.tap.v3.Tap"; + + // Common configuration for the HTTP tap filter. + common.tap.v4alpha.CommonExtensionConfig common_config = 1 + [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD index 6d6a05070f89..31c49afb4c5c 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD @@ -6,8 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/filter/http/wasm/v2:pkg", - "//envoy/extensions/wasm/v3:pkg", + "//envoy/config/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto index ff23cb3c31a6..a8e583c921e2 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto @@ -2,23 +2,21 @@ syntax = "proto3"; package envoy.extensions.filters.http.wasm.v3; -import "envoy/extensions/wasm/v3/wasm.proto"; +import "envoy/config/wasm/v3/wasm.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] // Wasm :ref:`configuration overview `. message Wasm { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.wasm.v2.Wasm"; - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; + config.wasm.v3.PluginConfig config = 1; } diff --git a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto b/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto index 869eef571235..cb439b0973ba 100644 --- a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto +++ b/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.listener.http_inspector.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.http_inspector.v3"; option java_outer_classname = "HttpInspectorProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Inspector Filter] // Detect whether the application protocol is HTTP. diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto b/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto index 962306b0a4fc..8239c5c42c52 100644 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto +++ b/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.listener.original_dst.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_dst.v3"; option java_outer_classname = "OriginalDstProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Original Dst Filter] // Use the Original destination address on downstream connections. diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto b/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto index f35bd821582d..5fd07924d7fd 100644 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto +++ b/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.listener.original_src.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_src.v3"; option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. diff --git a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto index 67eff4341ef7..63ad72945e28 100644 --- a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.listener.proxy_protocol.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3"; option java_outer_classname = "ProxyProtocolProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Proxy Protocol Filter] // PROXY protocol listener filter. diff --git a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto b/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto index bf6fa7224b63..eff9774844f4 100644 --- a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto +++ b/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.listener.tls_inspector.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.tls_inspector.v3"; option java_outer_classname = "TlsInspectorProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: TLS Inspector Filter] // Allows detecting whether the transport appears to be TLS or plaintext. diff --git a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index 175793d68b7a..e2da157574f8 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -6,13 +6,14 @@ import "envoy/config/core/v3/address.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.client_ssl_auth.v3"; option java_outer_classname = "ClientSslAuthProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Client TLS authentication] // Client TLS authentication diff --git a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto index fba1384f3c43..2742372b2f91 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto @@ -4,11 +4,13 @@ package envoy.extensions.filters.network.direct_response.v3; import "envoy/config/core/v3/base.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.direct_response.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Direct response] // Direct response :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto index 7ba81d8596f8..fa1959a425c8 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.network.dubbo_proxy.router.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.router.v3"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] // Dubbo router :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto index 089d79868158..749708880d71 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto @@ -6,13 +6,14 @@ import "envoy/extensions/filters/network/dubbo_proxy/v3/route.proto"; import "google/protobuf/any.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; option java_outer_classname = "DubboProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto index 63e3c13a2987..f06518c0b672 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto @@ -6,13 +6,14 @@ import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/range.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dubbo Proxy Route Configuration] // Dubbo Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto b/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto index edbc79a1ab25..077d87259b6b 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.network.echo.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.echo.v3"; option java_outer_classname = "EchoProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Echo] // Echo :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto index 82e5b8145db6..c3a63ac0a4f6 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.network.ext_authz.v3; import "envoy/config/core/v3/grpc_service.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v3"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Network External Authorization ] // The network layer external authorization service configuration diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 2e15dbb28c79..6d1044caa76b 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -16,20 +16,21 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3"; option java_outer_classname = "HttpConnectionManagerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 36] +// [#next-free-field: 37] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -105,27 +106,6 @@ message HttpConnectionManager { EGRESS = 1; } - // The span name will be derived from this field. If - // :ref:`traffic_direction ` is - // specified on the parent listener, then it is used instead of this field. - // - // .. attention:: - // This field has been deprecated in favor of `traffic_direction`. - OperationName hidden_envoy_deprecated_operation_name = 1 [ - deprecated = true, - (validate.rules).enum = {defined_only: true}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // A list of header names used to create tags for the active span. The header name is used to - // populate the tag name, and the header value is used to populate the tag value. The tag is - // created if the specified header name is present in the request's headers. - // - // .. attention:: - // This field has been deprecated in favor of :ref:`custom_tags - // `. - repeated string hidden_envoy_deprecated_request_headers_for_tags = 2 [deprecated = true]; - // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable @@ -168,6 +148,14 @@ message HttpConnectionManager { // from the bootstrap config. // [#not-implemented-hide:] config.trace.v3.Tracing.Http provider = 9; + + OperationName hidden_envoy_deprecated_operation_name = 1 [ + deprecated = true, + (validate.rules).enum = {defined_only: true}, + (envoy.annotations.disallowed_by_default) = true + ]; + + repeated string hidden_envoy_deprecated_request_headers_for_tags = 2 [deprecated = true]; } message InternalAddressConfig { @@ -255,74 +243,59 @@ message HttpConnectionManager { // more information. string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; - oneof route_specifier { - option (validate.required) = true; - - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; + // The connection manager’s route table will be dynamically loaded via the RDS API. + repeated HttpFilter http_filters = 5; - // The route table for the connection manager is static and is specified in this property. - config.route.v3.RouteConfiguration route_config = 4; + // The route table for the connection manager is static and is specified in this property. + google.protobuf.BoolValue add_user_agent = 6; - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + Tracing tracing = 7; // A list of individual HTTP filters that make up the filter chain for // requests made to the connection manager. Order matters as the filters are // processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; + config.core.v3.HttpProtocolOptions common_http_protocol_options = 35; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; + config.core.v3.Http1ProtocolOptions http_protocol_options = 8; // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. - Tracing tracing = 7; + config.core.v3.Http2ProtocolOptions http2_protocol_options = 9; // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - config.core.v3.HttpProtocolOptions common_http_protocol_options = 35; + string server_name = 10; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - config.core.v3.Http1ProtocolOptions http_protocol_options = 8; + ServerHeaderTransformation server_header_transformation = 34 + [(validate.rules).enum = {defined_only: true}]; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v3.Http2ProtocolOptions http2_protocol_options = 9; + google.protobuf.UInt32Value max_request_headers_kb = 29 + [(validate.rules).uint32 = {lte: 96 gt: 0}]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. - string server_name = 10; + google.protobuf.Duration stream_idle_timeout = 24; // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in // server_name. - ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum = {defined_only: true}]; + google.protobuf.Duration request_timeout = 28; // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. // Requests that exceed this limit will receive a 431 response. // The max configurable limit is 96 KiB, based on current implementation // constraints. - google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32 = {lte: 96 gt: 0}]; - - // The idle timeout for connections managed by the connection manager. The - // idle timeout is defined as the period in which there are no active - // requests. If not set, there is no idle timeout. When the idle timeout is - // reached the connection will be closed. If the connection is an HTTP/2 - // connection a drain sequence will occur prior to closing the connection. - // This field is deprecated. Use :ref:`idle_timeout - // ` - // instead. - google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + google.protobuf.Duration drain_timeout = 12; // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected @@ -349,13 +322,13 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; + google.protobuf.Duration delayed_close_timeout = 26; - // A timeout for idle requests managed by the connection manager. + // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; + repeated config.accesslog.v3.AccessLog access_log = 13; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification†(GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -366,7 +339,7 @@ message HttpConnectionManager { // both when a connection hits the idle timeout or during general server // draining. The default grace period is 5000 milliseconds (5 seconds) if this // option is not specified. - google.protobuf.Duration drain_timeout = 12; + google.protobuf.BoolValue use_remote_address = 14; // The delayed close timeout is for downstream connections managed by the HTTP connection manager. // It is defined as a grace period after connection close processing has been locally initiated @@ -398,11 +371,11 @@ message HttpConnectionManager { // A value of 0 will completely disable delayed close processing. When disabled, the downstream // connection's socket will be closed immediately after the write flush is completed or will // never close if the write flush does not complete. - google.protobuf.Duration delayed_close_timeout = 26; + uint32 xff_num_trusted_hops = 19; // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - repeated config.accesslog.v3.AccessLog access_log = 13; + InternalAddressConfig internal_address_config = 25; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating @@ -411,20 +384,20 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; + bool skip_xff_append = 21; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when // determining the origin client's IP address. The default is zero if this option // is not specified. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - uint32 xff_num_trusted_hops = 19; + string via = 22; // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information about internal/external addresses. - InternalAddressConfig internal_address_config = 25; + google.protobuf.BoolValue generate_request_id = 15; // If set, Envoy will not append the remote address to the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in @@ -434,28 +407,28 @@ message HttpConnectionManager { // will also suppress XFF addition, it has consequences for logging and other // Envoy uses of the remote address, so *skip_xff_append* should be used // when only an elision of XFF addition is intended. - bool skip_xff_append = 21; + bool preserve_external_request_id = 32; // Via header value to append to request and response headers. If this is // empty, no via header will be appended. - string via = 22; + ForwardClientCertDetails forward_client_cert_details = 16 + [(validate.rules).enum = {defined_only: true}]; // Whether the connection manager will generate the :ref:`x-request-id // ` header if it does not exist. This defaults to // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature // is not desired it can be disabled. - google.protobuf.BoolValue generate_request_id = 15; + SetCurrentClientCertDetails set_current_client_cert_details = 17; // Whether the connection manager will keep the :ref:`x-request-id // ` header if passed for a request that is edge // (Edge request is the request from external clients to front Envoy) and not reset it, which // is the current Envoy behaviour. This defaults to false. - bool preserve_external_request_id = 32; + bool proxy_100_continue = 18; // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; + bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; // This field is valid only when :ref:`forward_client_cert_details // ` @@ -464,13 +437,13 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and // *By* is always set when the client certificate presents the URI type Subject Alternative Name // value. - SetCurrentClientCertDetails set_current_client_cert_details = 17; + repeated UpgradeConfig upgrade_configs = 23; // If proxy_100_continue is true, Envoy will proxy incoming "Expect: // 100-continue" headers upstream, and forward "100 Continue" responses // downstream. If this is false or not set, Envoy will instead strip the // "Expect: 100-continue" header, and send a "100 Continue" response itself. - bool proxy_100_continue = 18; + google.protobuf.BoolValue normalize_path = 30; // If // :ref:`use_remote_address @@ -485,9 +458,9 @@ message HttpConnectionManager { // ` for runtime // control. // [#not-implemented-hide:] - bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; + bool merge_slashes = 33; - repeated UpgradeConfig upgrade_configs = 23; + RequestIDExtension request_id_extension = 36; // Should paths be normalized according to RFC 3986 before any processing of // requests by HTTP filters or routing? This affects the upstream *:path* header @@ -500,14 +473,33 @@ message HttpConnectionManager { // for details of normalization. // Note that Envoy does not perform // `case normalization ` - google.protobuf.BoolValue normalize_path = 30; + google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. - bool merge_slashes = 33; + oneof route_specifier { + option (validate.required) = true; + + // Determines if adjacent slashes in the path are merged into one before any processing of + // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without + // setting this option, incoming requests with path `//dir///file` will not match against route + // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of + // `HTTP spec ` and is provided for convenience. + Rds rds = 3; + + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. + // + // If not set, Envoy uses the default UUID-based behavior: + // + // 1. Request ID is propagated using *x-request-id* header. + // + // 2. Request ID is a universally unique identifier (UUID). + // + // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. + config.route.v3.RouteConfiguration route_config = 4; + + ScopedRoutes scoped_routes = 31; + } } message Rds { @@ -682,8 +674,16 @@ message HttpFilter { // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 4; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } + +message RequestIDExtension { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.network.http_connection_manager.v2.RequestIDExtension"; + + // Request ID extension specific configuration. + google.protobuf.Any typed_config = 1; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD new file mode 100644 index 000000000000..792ccf7ab677 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD @@ -0,0 +1,19 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/accesslog/v3:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/config/trace/v4alpha:pkg", + "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", + "//envoy/type/tracing/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto new file mode 100644 index 000000000000..226dc2727fc5 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -0,0 +1,685 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.http_connection_manager.v4alpha; + +import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/protocol.proto"; +import "envoy/config/route/v4alpha/route.proto"; +import "envoy/config/route/v4alpha/scoped_route.proto"; +import "envoy/config/trace/v4alpha/trace.proto"; +import "envoy/type/tracing/v3/custom_tag.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v4alpha"; +option java_outer_classname = "HttpConnectionManagerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP connection manager] +// HTTP connection manager :ref:`configuration overview `. +// [#extension: envoy.filters.network.http_connection_manager] + +// [#next-free-field: 37] +message HttpConnectionManager { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; + + enum CodecType { + // For every new connection, the connection manager will determine which + // codec to use. This mode supports both ALPN for TLS listeners as well as + // protocol inference for plaintext listeners. If ALPN data is available, it + // is preferred, otherwise protocol inference is used. In almost all cases, + // this is the right option to choose for this setting. + AUTO = 0; + + // The connection manager will assume that the client is speaking HTTP/1.1. + HTTP1 = 1; + + // The connection manager will assume that the client is speaking HTTP/2 + // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. + // Prior knowledge is allowed). + HTTP2 = 2; + + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + HTTP3 = 3; + } + + enum ServerHeaderTransformation { + // Overwrite any Server header with the contents of server_name. + OVERWRITE = 0; + + // If no Server header is present, append Server server_name + // If a Server header is present, pass it through. + APPEND_IF_ABSENT = 1; + + // Pass through the value of the server header, and do not append a header + // if none is present. + PASS_THROUGH = 2; + } + + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + enum ForwardClientCertDetails { + // Do not send the XFCC header to the next hop. This is the default value. + SANITIZE = 0; + + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + FORWARD_ONLY = 1; + + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + APPEND_FORWARD = 2; + + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + SANITIZE_SET = 3; + + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + ALWAYS_FORWARD_ONLY = 4; + } + + // [#next-free-field: 10] + message Tracing { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing"; + + enum OperationName { + // The HTTP listener is used for ingress/incoming requests. + INGRESS = 0; + + // The HTTP listener is used for egress/outgoing requests. + EGRESS = 1; + } + + reserved 1, 2; + + reserved "operation_name", "request_headers_for_tags"; + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + type.v3.Percent client_sampling = 3; + + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.Percent random_sampling = 4; + + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.Percent overall_sampling = 5; + + // Whether to annotate spans with additional data. If true, spans will include logs for stream + // events. + bool verbose = 6; + + // Maximum length of the request path to extract and include in the HttpUrl tag. Used to + // truncate lengthy request paths to meet the needs of a tracing backend. + // Default: 256 + google.protobuf.UInt32Value max_path_tag_length = 7; + + // A list of custom tags with unique tag name to create tags for the active span. + repeated type.tracing.v3.CustomTag custom_tags = 8; + + // Configuration for an external tracing provider. + // If not specified, Envoy will fall back to using tracing provider configuration + // from the bootstrap config. + // [#not-implemented-hide:] + config.trace.v4alpha.Tracing.Http provider = 9; + } + + message InternalAddressConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." + "InternalAddressConfig"; + + // Whether unix socket addresses should be considered internal. + bool unix_sockets = 1; + } + + // [#next-free-field: 7] + message SetCurrentClientCertDetails { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." + "SetCurrentClientCertDetails"; + + reserved 2; + + // Whether to forward the subject of the client cert. Defaults to false. + google.protobuf.BoolValue subject = 1; + + // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + // XFCC header comma separated from other values with the value Cert="PEM". + // Defaults to false. + bool cert = 3; + + // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + // format. This will appear in the XFCC header comma separated from other values with the value + // Chain="PEM". + // Defaults to false. + bool chain = 6; + + // Whether to forward the DNS type Subject Alternative Names of the client cert. + // Defaults to false. + bool dns = 4; + + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. + bool uri = 5; + } + + // The configuration for HTTP upgrades. + // For each upgrade type desired, an UpgradeConfig must be added. + // + // .. warning:: + // + // The current implementation of upgrade headers does not handle + // multi-valued upgrade headers. Support for multi-valued headers may be + // added in the future if needed. + // + // .. warning:: + // The current implementation of upgrade headers does not work with HTTP/2 + // upstreams. + message UpgradeConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." + "UpgradeConfig"; + + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] + // will be proxied upstream. + string upgrade_type = 1; + + // If present, this represents the filter chain which will be created for + // this type of upgrade. If no filters are present, the filter chain for + // HTTP connections will be used for this upgrade type. + repeated HttpFilter filters = 2; + + // Determines if upgrades are enabled or disabled by default. Defaults to true. + // This can be overridden on a per-route basis with :ref:`cluster + // ` as documented in the + // :ref:`upgrade documentation `. + google.protobuf.BoolValue enabled = 3; + } + + reserved 27, 11; + + reserved "idle_timeout"; + + // Supplies the type of codec that the connection manager should use. + CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics for the + // connection manager. See the :ref:`statistics documentation ` for + // more information. + string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; + + // The route table for the connection manager is static and is specified in this property. + config.route.v4alpha.RouteConfiguration route_config = 4; + + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes scoped_routes = 31; + } + + // A list of individual HTTP filters that make up the filter chain for + // requests made to the connection manager. Order matters as the filters are + // processed sequentially as request events happen. + repeated HttpFilter http_filters = 5; + + // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + // documentation for more information. Defaults to false. + google.protobuf.BoolValue add_user_agent = 6; + + // Presence of the object defines whether the connection manager + // emits :ref:`tracing ` data to the :ref:`configured tracing provider + // `. + Tracing tracing = 7; + + // Additional settings for HTTP requests handled by the connection manager. These will be + // applicable to both HTTP1 and HTTP2 requests. + config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35; + + // Additional HTTP/1 settings that are passed to the HTTP/1 codec. + config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8; + + // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. + config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9; + + // An optional override that the connection manager will write to the server + // header in responses. If not set, the default is *envoy*. + string server_name = 10; + + // Defines the action to be applied to the Server header on the response path. + // By default, Envoy will overwrite the header with the value specified in + // server_name. + ServerHeaderTransformation server_header_transformation = 34 + [(validate.rules).enum = {defined_only: true}]; + + // The maximum request headers size for incoming connections. + // If unconfigured, the default max request headers allowed is 60 KiB. + // Requests that exceed this limit will receive a 431 response. + // The max configurable limit is 96 KiB, based on current implementation + // constraints. + google.protobuf.UInt32Value max_request_headers_kb = 29 + [(validate.rules).uint32 = {lte: 96 gt: 0}]; + + // The stream idle timeout for connections managed by the connection manager. + // If not specified, this defaults to 5 minutes. The default value was selected + // so as not to interfere with any smaller configured timeouts that may have + // existed in configurations prior to the introduction of this feature, while + // introducing robustness to TCP connections that terminate without a FIN. + // + // This idle timeout applies to new streams and is overridable by the + // :ref:`route-level idle_timeout + // `. Even on a stream in + // which the override applies, prior to receipt of the initial request + // headers, the :ref:`stream_idle_timeout + // ` + // applies. Each time an encode/decode event for headers or data is processed + // for the stream, the timer will be reset. If the timeout fires, the stream + // is terminated with a 408 Request Timeout error code if no upstream response + // header has been received, otherwise a stream reset occurs. + // + // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due + // to the granularity of events presented to the connection manager. For example, while receiving + // very large request headers, it may be the case that there is traffic regularly arriving on the + // wire while the connection manage is only able to observe the end-of-headers event, hence the + // stream may still idle timeout. + // + // A value of 0 will completely disable the connection manager stream idle + // timeout, although per-route idle timeout overrides will continue to apply. + google.protobuf.Duration stream_idle_timeout = 24; + + // The amount of time that Envoy will wait for the entire request to be received. + // The timer is activated when the request is initiated, and is disarmed when the last byte of the + // request is sent upstream (i.e. all decoding filters have processed the request), OR when the + // response is initiated. If not specified or set to 0, this timeout is disabled. + google.protobuf.Duration request_timeout = 28; + + // The time that Envoy will wait between sending an HTTP/2 “shutdown + // notification†(GOAWAY frame with max stream ID) and a final GOAWAY frame. + // This is used so that Envoy provides a grace period for new streams that + // race with the final GOAWAY frame. During this grace period, Envoy will + // continue to accept new streams. After the grace period, a final GOAWAY + // frame is sent and Envoy will start refusing new streams. Draining occurs + // both when a connection hits the idle timeout or during general server + // draining. The default grace period is 5000 milliseconds (5 seconds) if this + // option is not specified. + google.protobuf.Duration drain_timeout = 12; + + // The delayed close timeout is for downstream connections managed by the HTTP connection manager. + // It is defined as a grace period after connection close processing has been locally initiated + // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy + // from the downstream connection) prior to Envoy closing the socket associated with that + // connection. + // NOTE: This timeout is enforced even when the socket associated with the downstream connection + // is pending a flush of the write buffer. However, any progress made writing data to the socket + // will restart the timer associated with this timeout. This means that the total grace period for + // a socket in this state will be + // +. + // + // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close + // sequence mitigates a race condition that exists when downstream clients do not drain/process + // data in a connection's receive buffer after a remote close has been detected via a socket + // write(). This race leads to such clients failing to process the response code sent by Envoy, + // which could result in erroneous downstream processing. + // + // If the timeout triggers, Envoy will close the connection's socket. + // + // The default timeout is 1000 ms if this option is not specified. + // + // .. NOTE:: + // To be useful in avoiding the race condition described above, this timeout must be set + // to *at least* +<100ms to account for + // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. + // + // .. WARNING:: + // A value of 0 will completely disable delayed close processing. When disabled, the downstream + // connection's socket will be closed immediately after the write flush is completed or will + // never close if the write flush does not complete. + google.protobuf.Duration delayed_close_timeout = 26; + + // Configuration for :ref:`HTTP access logs ` + // emitted by the connection manager. + repeated config.accesslog.v3.AccessLog access_log = 13; + + // If set to true, the connection manager will use the real remote address + // of the client connection when determining internal versus external origin and manipulating + // various headers. If set to false or absent, the connection manager will use the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for`, + // :ref:`config_http_conn_man_headers_x-envoy-internal`, and + // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. + google.protobuf.BoolValue use_remote_address = 14; + + // The number of additional ingress proxy hops from the right side of the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when + // determining the origin client's IP address. The default is zero if this option + // is not specified. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. + uint32 xff_num_trusted_hops = 19; + + // Configures what network addresses are considered internal for stats and header sanitation + // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. + // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information about internal/external addresses. + InternalAddressConfig internal_address_config = 25; + + // If set, Envoy will not append the remote address to the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in + // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager + // has mutated the request headers. While :ref:`use_remote_address + // ` + // will also suppress XFF addition, it has consequences for logging and other + // Envoy uses of the remote address, so *skip_xff_append* should be used + // when only an elision of XFF addition is intended. + bool skip_xff_append = 21; + + // Via header value to append to request and response headers. If this is + // empty, no via header will be appended. + string via = 22; + + // Whether the connection manager will generate the :ref:`x-request-id + // ` header if it does not exist. This defaults to + // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature + // is not desired it can be disabled. + google.protobuf.BoolValue generate_request_id = 15; + + // Whether the connection manager will keep the :ref:`x-request-id + // ` header if passed for a request that is edge + // (Edge request is the request from external clients to front Envoy) and not reset it, which + // is the current Envoy behaviour. This defaults to false. + bool preserve_external_request_id = 32; + + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + ForwardClientCertDetails forward_client_cert_details = 16 + [(validate.rules).enum = {defined_only: true}]; + + // This field is valid only when :ref:`forward_client_cert_details + // ` + // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in + // the client certificate to be forwarded. Note that in the + // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and + // *By* is always set when the client certificate presents the URI type Subject Alternative Name + // value. + SetCurrentClientCertDetails set_current_client_cert_details = 17; + + // If proxy_100_continue is true, Envoy will proxy incoming "Expect: + // 100-continue" headers upstream, and forward "100 Continue" responses + // downstream. If this is false or not set, Envoy will instead strip the + // "Expect: 100-continue" header, and send a "100 Continue" response itself. + bool proxy_100_continue = 18; + + // If + // :ref:`use_remote_address + // ` + // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is + // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. + // This is useful for testing compatibility of upstream services that parse the header value. For + // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses + // `_ for details. This will also affect the + // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See + // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + // ` for runtime + // control. + // [#not-implemented-hide:] + bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; + + repeated UpgradeConfig upgrade_configs = 23; + + // Should paths be normalized according to RFC 3986 before any processing of + // requests by HTTP filters or routing? This affects the upstream *:path* header + // as well. For paths that fail this check, Envoy will respond with 400 to + // paths that are malformed. This defaults to false currently but will default + // true in the future. When not specified, this value may be overridden by the + // runtime variable + // :ref:`http_connection_manager.normalize_path`. + // See `Normalization and Comparison ` + // for details of normalization. + // Note that Envoy does not perform + // `case normalization ` + google.protobuf.BoolValue normalize_path = 30; + + // Determines if adjacent slashes in the path are merged into one before any processing of + // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without + // setting this option, incoming requests with path `//dir///file` will not match against route + // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of + // `HTTP spec ` and is provided for convenience. + bool merge_slashes = 33; + + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. + // + // If not set, Envoy uses the default UUID-based behavior: + // + // 1. Request ID is propagated using *x-request-id* header. + // + // 2. Request ID is a universally unique identifier (UUID). + // + // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. + RequestIDExtension request_id_extension = 36; +} + +message Rds { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.Rds"; + + // Configuration source specifier for RDS. + config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; + + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// This message is used to work around the limitations with 'oneof' and repeated fields. +message ScopedRouteConfigurationsList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList"; + + repeated config.route.v4alpha.ScopedRouteConfiguration scoped_route_configurations = 1 + [(validate.rules).repeated = {min_items: 1}]; +} + +// [#next-free-field: 6] +message ScopedRoutes { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes"; + + // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These + // keys are matched against a set of :ref:`Key` + // objects assembled from :ref:`ScopedRouteConfiguration` + // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via + // :ref:`scoped_route_configurations_list`. + // + // Upon receiving a request's headers, the Router will build a key using the algorithm specified + // by this message. This key will be used to look up the routing table (i.e., the + // :ref:`RouteConfiguration`) to use for the request. + message ScopeKeyBuilder { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder"; + + // Specifies the mechanism for constructing key fragments which are composed into scope keys. + message FragmentBuilder { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." + "ScopeKeyBuilder.FragmentBuilder"; + + // Specifies how the value of a header should be extracted. + // The following example maps the structure of a header to the fields in this message. + // + // .. code:: + // + // <0> <1> <-- index + // X-Header: a=b;c=d + // | || | + // | || \----> + // | || + // | |\----> + // | | + // | \----> + // | + // \----> + // + // Each 'a=b' key-value pair constitutes an 'element' of the header field. + message HeaderValueExtractor { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." + "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor"; + + // Specifies a header field's key value pair to match on. + message KvElement { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." + "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement"; + + // The separator between key and value (e.g., '=' separates 'k=v;...'). + // If an element is an empty string, the element is ignored. + // If an element contains no separator, the whole element is parsed as key and the + // fragment value is an empty string. + // If there are multiple values for a matched key, the first value is returned. + string separator = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The key to match on. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // The name of the header field to extract the value from. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The element separator (e.g., ';' separates 'a;b;c;d'). + // Default: empty string. This causes the entirety of the header field to be extracted. + // If this field is set to an empty string and 'index' is used in the oneof below, 'index' + // must be set to 0. + string element_separator = 2; + + oneof extract_type { + // Specifies the zero based index of the element to extract. + // Note Envoy concatenates multiple values of the same header key into a comma separated + // string, the splitting always happens after the concatenation. + uint32 index = 3; + + // Specifies the key value pair to extract the value from. + KvElement element = 4; + } + } + + oneof type { + option (validate.required) = true; + + // Specifies how a header field's value should be extracted. + HeaderValueExtractor header_value_extractor = 1; + } + } + + // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the + // fragments of a :ref:`ScopedRouteConfiguration`. + // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. + repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // The name assigned to the scoped routing configuration. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The algorithm to use for constructing a scope key for each request. + ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; + + // Configuration source specifier for RDS. + // This config source is used to subscribe to RouteConfiguration resources specified in + // ScopedRouteConfiguration messages. + config.core.v4alpha.ConfigSource rds_config_source = 3 + [(validate.rules).message = {required: true}]; + + oneof config_specifier { + option (validate.required) = true; + + // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by + // matching a key constructed from the request's attributes according to the algorithm specified + // by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRouteConfigurationsList scoped_route_configurations_list = 4; + + // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS + // API. A scope is assigned to a request by matching a key constructed from the request's + // attributes according to the algorithm specified by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRds scoped_rds = 5; + } +} + +message ScopedRds { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds"; + + // Configuration source specifier for scoped RDS. + config.core.v4alpha.ConfigSource scoped_rds_config_source = 1 + [(validate.rules).message = {required: true}]; +} + +message HttpFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; + + reserved 3, 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +message RequestIDExtension { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension"; + + // Request ID extension specific configuration. + google.protobuf.Any typed_config = 1; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto index 145866a4cb49..497e688f4c3d 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.network.kafka_broker.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_broker.v3"; option java_outer_classname = "KafkaBrokerProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Kafka Broker] // Kafka Broker :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto index 6eb3e141b6e9..027bc0e3fc98 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto @@ -5,13 +5,14 @@ package envoy.extensions.filters.network.local_ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/token_bucket.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.local_ratelimit.v3"; option java_outer_classname = "LocalRateLimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Local rate limit] // Local rate limit :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto index d6faf97b112f..7bd17600d145 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.network.mongo_proxy.v3; import "envoy/extensions/filters/common/fault/v3/fault.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.mongo_proxy.v3"; option java_outer_classname = "MongoProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Mongo proxy] // MongoDB :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto index 08058740d609..663449b27035 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.filters.network.mysql_proxy.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.mysql_proxy.v3"; option java_outer_classname = "MysqlProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: MySQL proxy] // MySQL Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto index ef88a4eefc17..b92d3cee2541 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto @@ -7,13 +7,14 @@ import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.ratelimit.v3"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto b/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto index e5e0022a9230..e62f7b4c419e 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.network.rbac.v3; import "envoy/config/rbac/v3/rbac.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v3"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/BUILD new file mode 100644 index 000000000000..25620c85c513 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/rbac/v4alpha:pkg", + "//envoy/extensions/filters/network/rbac/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto new file mode 100644 index 000000000000..8452a89822c1 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rbac.v4alpha; + +import "envoy/config/rbac/v4alpha/rbac.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v4alpha"; +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: RBAC] +// Role-Based Access Control :ref:`configuration overview `. +// [#extension: envoy.filters.network.rbac] + +// RBAC network filter config. +// +// Header should not be used in rules/shadow_rules in RBAC network filter as +// this information is only available in :ref:`RBAC http filter `. +message RBAC { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rbac.v3.RBAC"; + + enum EnforcementType { + // Apply RBAC policies when the first byte of data arrives on the connection. + ONE_TIME_ON_FIRST_BYTE = 0; + + // Continuously apply RBAC policies as data arrives. Use this mode when + // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, + // etc. when the protocol decoders emit dynamic metadata such as the + // resources being accessed and the operations on the resources. + CONTINUOUS = 1; + } + + // Specify the RBAC rules to be applied globally. + // If absent, no enforcing RBAC policy will be applied. + config.rbac.v4alpha.RBAC rules = 1; + + // Shadow rules are not enforced by the filter but will emit stats and logs + // and can be used for rule testing. + // If absent, no shadow RBAC policy will be applied. + config.rbac.v4alpha.RBAC shadow_rules = 2; + + // The prefix to use when emitting statistics. + string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; + + // RBAC enforcement strategy. By default RBAC will be enforced only once + // when the first byte of data arrives from the downstream. When used in + // conjunction with filters that emit dynamic metadata after decoding + // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to + // CONTINUOUS to enforce RBAC policies on every message boundary. + EnforcementType enforcement_type = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 4232a45e07c2..60ab28cfcf1f 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -7,15 +7,16 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/deprecation.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - -import "envoy/annotations/deprecation.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.redis_proxy.v3"; option java_outer_classname = "RedisProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Redis Proxy] // Redis Proxy :ref:`configuration overview `. @@ -174,34 +175,15 @@ message RedisProxy { // Optional catch-all route to forward commands that doesn't match any of the routes. The // catch-all route becomes required when no routes are specified. - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch_all - // route` - // instead. + Route catch_all_route = 4; + string hidden_envoy_deprecated_catch_all_cluster = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Optional catch-all route to forward commands that doesn't match any of the routes. The - // catch-all route becomes required when no routes are specified. - Route catch_all_route = 4; } // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - // Name of cluster from cluster manager. See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing cluster. - // - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch_all - // route` - // instead. - string hidden_envoy_deprecated_cluster = 2 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - // Network settings for the connection pool to the upstream clusters. ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; @@ -247,6 +229,9 @@ message RedisProxy { // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; + + string hidden_envoy_deprecated_cluster = 2 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // RedisProtocolOptions specifies Redis upstream protocol options. This object is used in diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto b/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto index fb39f261f5ad..3d6f0ee234ab 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.filters.network.sni_cluster.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_cluster.v3"; option java_outer_classname = "SniClusterProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SNI Cluster Filter] // Set the upstream cluster name from the SNI field in the TLS connection. diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto index 884d83f41769..6024a6d552bc 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto @@ -10,13 +10,14 @@ import "envoy/type/v3/hash_policy.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v3"; option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: TCP Proxy] // TCP Proxy :ref:`configuration overview `. @@ -27,68 +28,6 @@ message TcpProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.tcp_proxy.v2.TcpProxy"; - // [#not-implemented-hide:] Deprecated. - // TCP Proxy filter configuration using V1 format. - message DeprecatedV1 { - option deprecated = true; - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1"; - - // A TCP proxy route consists of a set of optional L4 criteria and the - // name of a cluster. If a downstream connection matches all the - // specified criteria, the cluster in the route is used for the - // corresponding upstream connection. Routes are tried in the order - // specified until a match is found. If no match is found, the connection - // is closed. A route with no criteria is valid and always produces a - // match. - // [#next-free-field: 6] - message TCPRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1.TCPRoute"; - - // The cluster to connect to when a the downstream network connection - // matches the specified criteria. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // An optional list of IP address subnets in the form - // “ip_address/xxâ€. The criteria is satisfied if the destination IP - // address of the downstream connection is contained in at least one of - // the specified subnets. If the parameter is not specified or the list - // is empty, the destination IP address is ignored. The destination IP - // address of the downstream connection might be different from the - // addresses on which the proxy is listening if the connection has been - // redirected. - repeated config.core.v3.CidrRange destination_ip_list = 2; - - // An optional string containing a comma-separated list of port numbers - // or ranges. The criteria is satisfied if the destination port of the - // downstream connection is contained in at least one of the specified - // ranges. If the parameter is not specified, the destination port is - // ignored. The destination port address of the downstream connection - // might be different from the port on which the proxy is listening if - // the connection has been redirected. - string destination_ports = 3; - - // An optional list of IP address subnets in the form - // “ip_address/xxâ€. The criteria is satisfied if the source IP address - // of the downstream connection is contained in at least one of the - // specified subnets. If the parameter is not specified or the list is - // empty, the source IP address is ignored. - repeated config.core.v3.CidrRange source_ip_list = 4; - - // An optional string containing a comma-separated list of port numbers - // or ranges. The criteria is satisfied if the source port of the - // downstream connection is contained in at least one of the specified - // ranges. If the parameter is not specified, the source port is - // ignored. - string source_ports = 5; - } - - // The route table for the filter. All filter instances must have a route - // table, even if it is empty. - repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; - } - // Allows for specification of multiple upstream clusters along with weights // that indicate the percentage of traffic to be forwarded to each cluster. // The router selects an upstream cluster based on these weights. @@ -132,26 +71,46 @@ message TcpProxy { string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; } + message DeprecatedV1 { + option deprecated = true; + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1"; + + // [#next-free-field: 6] + message TCPRoute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1.TCPRoute"; + + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + repeated config.core.v3.CidrRange destination_ip_list = 2; + + string destination_ports = 3; + + repeated config.core.v3.CidrRange source_ip_list = 4; + + string source_ports = 5; + } + + repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; + } + // The prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2; + // The upstream cluster to connect to. + config.core.v3.Metadata metadata_match = 9; - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + google.protobuf.Duration idle_timeout = 8; // Optional endpoint metadata match criteria. Only endpoints in the upstream // cluster with metadata matching that set in metadata_match will be // considered. The filter name should be specified as *envoy.lb*. - config.core.v3.Metadata metadata_match = 9; + google.protobuf.Duration downstream_idle_timeout = 3; // The idle timeout for connections managed by the TCP proxy filter. The idle timeout // is defined as the period in which there are no bytes sent or received on either @@ -161,7 +120,7 @@ message TcpProxy { // .. warning:: // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP // FIN packets, etc. - google.protobuf.Duration idle_timeout = 8; + google.protobuf.Duration upstream_idle_timeout = 4; // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy // filter. The idle timeout is defined as the period in which there is no @@ -169,30 +128,33 @@ message TcpProxy { // is reached the connection will be closed. The distinction between // downstream_idle_timeout/upstream_idle_timeout provides a means to set // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; + repeated config.accesslog.v3.AccessLog access_log = 5; // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; // Configuration for :ref:`access logs ` // emitted by the this tcp_proxy. - repeated config.accesslog.v3.AccessLog access_log = 5; - - // [#not-implemented-hide:] Deprecated. - DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 [deprecated = true]; + repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; // The maximum number of unsuccessful connection attempts that will be made before // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; + TunnelingConfig tunneling_config = 12; // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based // load balancing algorithms will select a host randomly. Currently the number of hash policies is // limited to 1. - repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; + DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 [deprecated = true]; - // [#not-implemented-hide:] feature in progress - // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP - // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload - // will be proxied upstream as per usual. - TunnelingConfig tunneling_config = 12; + oneof cluster_specifier { + option (validate.required) = true; + + // [#not-implemented-hide:] feature in progress + // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP + // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload + // will be proxied upstream as per usual. + string cluster = 2; + + WeightedCluster weighted_clusters = 10; + } } diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto index 130156664942..4fc3289ae33d 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto @@ -6,13 +6,14 @@ import "envoy/config/ratelimit/v3/rls.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index 1d4034a3a2aa..3eeae0cba594 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -7,13 +7,14 @@ import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Thrift Proxy Route Configuration] // Thrift Proxy :ref:`configuration overview `. @@ -45,39 +46,39 @@ message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteMatch"; + // If specified, the route must exactly match the request method name. As a special case, an + // empty string matches any request method name. + bool invert = 3; + + // If specified, the route must have the service name as the request method name prefix. As a + // special case, an empty string matches any service name. Only relevant when service + // multiplexing. + repeated config.route.v3.HeaderMatcher headers = 4; + oneof match_specifier { option (validate.required) = true; - // If specified, the route must exactly match the request method name. As a special case, an - // empty string matches any request method name. + // Inverts whatever matching is done in the :ref:`method_name + // ` or + // :ref:`service_name + // ` fields. + // Cannot be combined with wildcard matching as that would result in routes never being matched. + // + // .. note:: + // + // This does not invert matching done as part of the :ref:`headers field + // ` field. To + // invert header matching, see :ref:`invert_match + // `. string method_name = 1; - // If specified, the route must have the service name as the request method name prefix. As a - // special case, an empty string matches any service name. Only relevant when service - // multiplexing. + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). Note that this only applies for Thrift transports and/or + // protocols that support headers. string service_name = 2; } - - // Inverts whatever matching is done in the :ref:`method_name - // ` or - // :ref:`service_name - // ` fields. - // Cannot be combined with wildcard matching as that would result in routes never being matched. - // - // .. note:: - // - // This does not invert matching done as part of the :ref:`headers field - // ` field. To - // invert header matching, see :ref:`invert_match - // `. - bool invert = 3; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). Note that this only applies for Thrift transports and/or - // protocols that support headers. - repeated config.route.v3.HeaderMatcher headers = 4; } // [#next-free-field: 7] @@ -85,42 +86,42 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction"; + // Indicates a single upstream cluster to which the request should be routed + // to. + config.core.v3.Metadata metadata_match = 3; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + repeated config.route.v3.RateLimit rate_limits = 4; + + // Envoy will determine the cluster to route to by reading the value of the + // Thrift header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist Envoy will + // respond with an unknown method exception or an internal error exception, + // respectively. + bool strip_service_name = 5; + oneof cluster_specifier { option (validate.required) = true; - // Indicates a single upstream cluster to which the request should be routed - // to. + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered. + // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match + // `, + // with values there taking precedence. Keys and values should be provided under the "envoy.lb" + // metadata key. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. + // Specifies a set of rate limit configurations that could be applied to the route. + // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders + // action with the header name ":method-name". WeightedCluster weighted_clusters = 2; - // Envoy will determine the cluster to route to by reading the value of the - // Thrift header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist Envoy will - // respond with an unknown method exception or an internal error exception, - // respectively. + // Strip the service prefix from the method name, if there's a prefix. For + // example, the method call Service:method would end up being just method. string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; } - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered. - // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match - // `, - // with values there taking precedence. Keys and values should be provided under the "envoy.lb" - // metadata key. - config.core.v3.Metadata metadata_match = 3; - - // Specifies a set of rate limit configurations that could be applied to the route. - // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders - // action with the header name ":method-name". - repeated config.route.v3.RateLimit rate_limits = 4; - - // Strip the service prefix from the method name, if there's a prefix. For - // example, the method call Service:method would end up being just method. - bool strip_service_name = 5; } // Allows for specification of multiple upstream clusters along with weights that indicate the diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto index 16e3ce8e96b8..cc8e89439420 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto @@ -7,13 +7,14 @@ import "envoy/extensions/filters/network/thrift_proxy/v3/route.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; option java_outer_classname = "ThriftProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. @@ -99,9 +100,9 @@ message ThriftFilter { // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; - google.protobuf.Any typed_config = 3; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD index 1c9ea8688661..31c49afb4c5c 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD @@ -6,8 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/filter/network/wasm/v2:pkg", - "//envoy/extensions/wasm/v3:pkg", + "//envoy/config/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto index 06f9923ea5a6..ec13bc7bee48 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto @@ -2,23 +2,21 @@ syntax = "proto3"; package envoy.extensions.filters.network.wasm.v3; -import "envoy/extensions/wasm/v3/wasm.proto"; +import "envoy/config/wasm/v3/wasm.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] // Wasm :ref:`configuration overview `. message Wasm { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.wasm.v2.Wasm"; - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; + config.wasm.v3.PluginConfig config = 1; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto index 863c43eeb698..a90f777d79ec 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto @@ -4,13 +4,14 @@ package envoy.extensions.filters.network.zookeeper_proxy.v3; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.zookeeper_proxy.v3"; option java_outer_classname = "ZookeeperProxyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: ZooKeeper proxy] // ZooKeeper Proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto b/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto index 8d808c30e5d9..fb7adf440288 100644 --- a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto +++ b/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto @@ -4,11 +4,13 @@ package envoy.extensions.retry.host.omit_host_metadata.v3; import "envoy/config/core/v3/base.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.retry.host.omit_host_metadata.v3"; option java_outer_classname = "OmitHostMetadataConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Omit host metadata retry predicate] diff --git a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto b/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto index 7d1edfbc73fc..b6a4bbecbae8 100644 --- a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto +++ b/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.retry.priority.previous_priorities.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.retry.priority.previous_priorities.v3"; option java_outer_classname = "PreviousPrioritiesConfigProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Previous priorities retry selector] diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto b/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto index f04869bc9245..6c001be1c746 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.alts.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.alts.v3"; option java_outer_classname = "AltsProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: ALTS] // [#extension: envoy.transport_sockets.alts] diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto b/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto index 369e34c7b95a..85406c1f7713 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.raw_buffer.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.raw_buffer.v3"; option java_outer_classname = "RawBufferProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Raw Buffer] // [#extension: envoy.transport_sockets.raw_buffer] diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto index 36fd4dee4a89..ef61575f67f7 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto @@ -5,13 +5,14 @@ package envoy.extensions.transport_sockets.tap.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/tap/v3/common.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap] // [#extension: envoy.transport_sockets.tap] diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/BUILD new file mode 100644 index 000000000000..76600e3dd208 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/common/tap/v4alpha:pkg", + "//envoy/extensions/transport_sockets/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto new file mode 100644 index 000000000000..5e0efc403ab5 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tap.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/extensions/common/tap/v4alpha/common.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap] +// [#extension: envoy.transport_sockets.tap] + +// Configuration for tap transport socket. This wraps another transport socket, providing the +// ability to interpose and record in plain text any traffic that is surfaced to Envoy. +message Tap { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tap.v3.Tap"; + + // Common configuration for the tap transport socket. + common.tap.v4alpha.CommonExtensionConfig common_config = 1 + [(validate.rules).message = {required: true}]; + + // The underlying transport socket being wrapped. + config.core.v4alpha.TransportSocket transport_socket = 2 + [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto index c06c39d7c51a..4121297ec1c3 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto @@ -12,13 +12,14 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "CertProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common TLS configuration] @@ -121,10 +122,10 @@ message PrivateKeyProvider { // Private key method provider specific configuration. oneof config_type { + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; - - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; } } @@ -288,16 +289,6 @@ message CertificateValidationContext { repeated string verify_certificate_hash = 2 [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - // An optional list of Subject Alternative Names. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified values. - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated string hidden_envoy_deprecated_verify_subject_alt_name = 4 [deprecated = true]; - // An optional list of Subject Alternative name matchers. Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified matches. // @@ -337,6 +328,8 @@ message CertificateValidationContext { // Certificate trust chain verification mode. TrustChainVerification trust_chain_verification = 10 [(validate.rules).enum = {defined_only: true}]; + + repeated string hidden_envoy_deprecated_verify_subject_alt_name = 4 [deprecated = true]; } // TLS context shared by both client and server TLS contexts. @@ -374,12 +367,12 @@ message CommonTlsContext { repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 1}]; - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; + // How to validate peer certificates. + repeated string alpn_protocols = 4; + oneof validation_context_type { // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; + CertificateValidationContext validation_context = 3; // Combined certificate validation context holds a default CertificateValidationContext // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic @@ -388,20 +381,20 @@ message CommonTlsContext { // CertificateValidationContext overwrites singular fields in default // CertificateValidationContext, and concatenates repeated fields to default // CertificateValidationContext, and logical OR is applied to boolean fields. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. CombinedCertificateValidationContext combined_validation_context = 8; } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; } message UpstreamTlsContext { @@ -434,7 +427,7 @@ message UpstreamTlsContext { google.protobuf.UInt32Value max_session_keys = 4; } -// [#next-free-field: 7] +// [#next-free-field: 8] message DownstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.DownstreamTlsContext"; @@ -450,22 +443,32 @@ message DownstreamTlsContext { // [#not-implemented-hide:] google.protobuf.BoolValue require_sni = 3; + // TLS session ticket key settings. + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; + oneof session_ticket_keys_type { - // TLS session ticket key settings. + // Config for fetching TLS session ticket keys via SDS API. TlsSessionTicketKeys session_ticket_keys = 4; - // Config for fetching TLS session ticket keys via SDS API. + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - } - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + bool disable_stateless_session_resumption = 7; + } } message GenericSecret { diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD new file mode 100644 index 000000000000..e56544584bfe --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto new file mode 100644 index 000000000000..febb6d665240 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto @@ -0,0 +1,518 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "CertProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.TlsParameters"; + + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and + // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider"; + + reserved 2; + + reserved "config"; + + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.TlsCertificate"; + + // The TLS certificate chain. + config.core.v4alpha.DataSource certificate_chain = 1; + + // The TLS private key. + config.core.v4alpha.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + config.core.v4alpha.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated config.core.v4alpha.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys"; + + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated config.core.v4alpha.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext"; + + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + reserved 4; + + reserved "verify_subject_alt_name"; + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + config.core.v4alpha.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + config.core.v4alpha.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." + "CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + config.core.v4alpha.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/generated_api_shadow/envoy/service/accesslog/v2/BUILD b/generated_api_shadow/envoy/service/accesslog/v2/BUILD index b25083a6222f..dbaf26b180f4 100644 --- a/generated_api_shadow/envoy/service/accesslog/v2/BUILD +++ b/generated_api_shadow/envoy/service/accesslog/v2/BUILD @@ -9,5 +9,6 @@ api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/data/accesslog/v2:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/service/accesslog/v2/als.proto b/generated_api_shadow/envoy/service/accesslog/v2/als.proto index 69618be73011..bbd871ff83a4 100644 --- a/generated_api_shadow/envoy/service/accesslog/v2/als.proto +++ b/generated_api_shadow/envoy/service/accesslog/v2/als.proto @@ -5,12 +5,14 @@ package envoy.service.accesslog.v2; import "envoy/api/v2/core/base.proto"; import "envoy/data/accesslog/v2/accesslog.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.accesslog.v2"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Access Log Service (ALS)] diff --git a/generated_api_shadow/envoy/service/accesslog/v3/als.proto b/generated_api_shadow/envoy/service/accesslog/v3/als.proto index 9749d1a9b96b..3f5e37325cc5 100644 --- a/generated_api_shadow/envoy/service/accesslog/v3/als.proto +++ b/generated_api_shadow/envoy/service/accesslog/v3/als.proto @@ -5,14 +5,15 @@ package envoy.service.accesslog.v3; import "envoy/config/core/v3/base.proto"; import "envoy/data/accesslog/v3/accesslog.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.accesslog.v3"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Access Log Service (ALS)] diff --git a/generated_api_shadow/envoy/service/auth/v2/BUILD b/generated_api_shadow/envoy/service/auth/v2/BUILD index 80ff3abc0c3b..b23b27a8aac9 100644 --- a/generated_api_shadow/envoy/service/auth/v2/BUILD +++ b/generated_api_shadow/envoy/service/auth/v2/BUILD @@ -9,5 +9,6 @@ api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto b/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto index d918c74440a6..16ac3ee23d49 100644 --- a/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto +++ b/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto @@ -7,9 +7,12 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.service.auth.v2"; option java_outer_classname = "AttributeContextProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Attribute Context ] diff --git a/generated_api_shadow/envoy/service/auth/v2/external_auth.proto b/generated_api_shadow/envoy/service/auth/v2/external_auth.proto index 8245106334b7..0f580fe7dc34 100644 --- a/generated_api_shadow/envoy/service/auth/v2/external_auth.proto +++ b/generated_api_shadow/envoy/service/auth/v2/external_auth.proto @@ -8,12 +8,14 @@ import "envoy/type/http_status.proto"; import "google/rpc/status.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v2"; option java_outer_classname = "ExternalAuthProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Authorization Service ] diff --git a/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto b/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto index 5e4e63933bfd..3c4fe0af665e 100644 --- a/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto +++ b/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto @@ -7,11 +7,13 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/timestamp.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v3"; option java_outer_classname = "AttributeContextProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Attribute Context ] diff --git a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto index d77ae9e03607..b93b61a3bde9 100644 --- a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto +++ b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto @@ -8,14 +8,15 @@ import "envoy/type/v3/http_status.proto"; import "google/rpc/status.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v3"; option java_outer_classname = "ExternalAuthProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Authorization Service ] diff --git a/generated_api_shadow/envoy/service/cluster/v3/cds.proto b/generated_api_shadow/envoy/service/cluster/v3/cds.proto index c0b9c4ace7ce..100ecad39a96 100644 --- a/generated_api_shadow/envoy/service/cluster/v3/cds.proto +++ b/generated_api_shadow/envoy/service/cluster/v3/cds.proto @@ -6,14 +6,15 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.cluster.v3"; option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: CDS] diff --git a/generated_api_shadow/envoy/service/discovery/v2/ads.proto b/generated_api_shadow/envoy/service/discovery/v2/ads.proto index 01759e5f1b36..d70e0cdc8e14 100644 --- a/generated_api_shadow/envoy/service/discovery/v2/ads.proto +++ b/generated_api_shadow/envoy/service/discovery/v2/ads.proto @@ -4,10 +4,13 @@ package envoy.service.discovery.v2; import "envoy/api/v2/discovery.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "AdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Aggregated Discovery Service (ADS)] diff --git a/generated_api_shadow/envoy/service/discovery/v2/hds.proto b/generated_api_shadow/envoy/service/discovery/v2/hds.proto index a0211685d28a..76f91c5a456d 100644 --- a/generated_api_shadow/envoy/service/discovery/v2/hds.proto +++ b/generated_api_shadow/envoy/service/discovery/v2/hds.proto @@ -10,12 +10,14 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "HdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.health.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health Discovery Service (HDS)] diff --git a/generated_api_shadow/envoy/service/discovery/v2/rtds.proto b/generated_api_shadow/envoy/service/discovery/v2/rtds.proto index e12ceab635bb..713ac277072b 100644 --- a/generated_api_shadow/envoy/service/discovery/v2/rtds.proto +++ b/generated_api_shadow/envoy/service/discovery/v2/rtds.proto @@ -9,6 +9,7 @@ import "google/protobuf/struct.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; @@ -16,6 +17,7 @@ option java_outer_classname = "RtdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.runtime.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Runtime Discovery Service (RTDS)] // RTDS :ref:`configuration overview ` diff --git a/generated_api_shadow/envoy/service/discovery/v2/sds.proto b/generated_api_shadow/envoy/service/discovery/v2/sds.proto index 6a131ad413bd..4d01d475c59b 100644 --- a/generated_api_shadow/envoy/service/discovery/v2/sds.proto +++ b/generated_api_shadow/envoy/service/discovery/v2/sds.proto @@ -8,12 +8,14 @@ import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "SdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.secret.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Secret Discovery Service (SDS)] diff --git a/generated_api_shadow/envoy/service/discovery/v3/ads.proto b/generated_api_shadow/envoy/service/discovery/v3/ads.proto index beaae93b91fc..03021559ab66 100644 --- a/generated_api_shadow/envoy/service/discovery/v3/ads.proto +++ b/generated_api_shadow/envoy/service/discovery/v3/ads.proto @@ -4,12 +4,14 @@ package envoy.service.discovery.v3; import "envoy/service/discovery/v3/discovery.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v3"; option java_outer_classname = "AdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Aggregated Discovery Service (ADS)] diff --git a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto b/generated_api_shadow/envoy/service/discovery/v3/discovery.proto index 02997a51dca2..b8e31160a88b 100644 --- a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto +++ b/generated_api_shadow/envoy/service/discovery/v3/discovery.proto @@ -7,11 +7,13 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v3"; option java_outer_classname = "DiscoveryProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common discovery API components] diff --git a/generated_api_shadow/envoy/service/endpoint/v3/eds.proto b/generated_api_shadow/envoy/service/endpoint/v3/eds.proto index ab2ec3271828..e1a8494afc8f 100644 --- a/generated_api_shadow/envoy/service/endpoint/v3/eds.proto +++ b/generated_api_shadow/envoy/service/endpoint/v3/eds.proto @@ -8,15 +8,16 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.endpoint.v3"; option java_outer_classname = "EdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` diff --git a/api/envoy/config/filter/http/wasm/v2/BUILD b/generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD similarity index 81% rename from api/envoy/config/filter/http/wasm/v2/BUILD rename to generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD index 7903b3becced..6db6b085b4e4 100644 --- a/api/envoy/config/filter/http/wasm/v2/BUILD +++ b/generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD @@ -5,8 +5,9 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( + has_services = True, deps = [ - "//envoy/config/wasm/v2:pkg", + "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto b/generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto new file mode 100644 index 000000000000..8d07f04640ca --- /dev/null +++ b/generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; + +package envoy.service.event_reporting.v2alpha; + +import "envoy/api/v2/core/base.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.event_reporting.v2alpha"; +option java_outer_classname = "EventReportingServiceProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.service.event_reporting.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: gRPC Event Reporting Service] + +// [#not-implemented-hide:] +// Service for streaming different types of events from Envoy to a server. The examples of +// such events may be health check or outlier detection events. +service EventReportingService { + // Envoy will connect and send StreamEventsRequest messages forever. + // The management server may send StreamEventsResponse to configure event stream. See below. + // This API is designed for high throughput with the expectation that it might be lossy. + rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { + } +} + +// [#not-implemented-hide:] +// An events envoy sends to the management server. +message StreamEventsRequest { + message Identifier { + // The node sending the event messages over the stream. + api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; + } + + // Identifier data that will only be sent in the first message on the stream. This is effectively + // structured metadata and is a performance optimization. + Identifier identifier = 1; + + // Batch of events. When the stream is already active, it will be the events occurred + // since the last message had been sent. If the server receives unknown event type, it should + // silently ignore it. + // + // The following events are supported: + // + // * :ref:`HealthCheckEvent ` + // * :ref:`OutlierDetectionEvent ` + repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; +} + +// [#not-implemented-hide:] +// The management server may send envoy a StreamEventsResponse to tell which events the server +// is interested in. In future, with aggregated event reporting service, this message will +// contain, for example, clusters the envoy should send events for, or event types the server +// wants to process. +message StreamEventsResponse { +} diff --git a/generated_api_shadow/envoy/service/event_reporting/v3/BUILD b/generated_api_shadow/envoy/service/event_reporting/v3/BUILD new file mode 100644 index 000000000000..99d01d89f712 --- /dev/null +++ b/generated_api_shadow/envoy/service/event_reporting/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/service/event_reporting/v2alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto b/generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto new file mode 100644 index 000000000000..6f0b325902fb --- /dev/null +++ b/generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto @@ -0,0 +1,69 @@ +syntax = "proto3"; + +package envoy.service.event_reporting.v3; + +import "envoy/config/core/v3/base.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.event_reporting.v3"; +option java_outer_classname = "EventReportingServiceProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: gRPC Event Reporting Service] + +// [#not-implemented-hide:] +// Service for streaming different types of events from Envoy to a server. The examples of +// such events may be health check or outlier detection events. +service EventReportingService { + // Envoy will connect and send StreamEventsRequest messages forever. + // The management server may send StreamEventsResponse to configure event stream. See below. + // This API is designed for high throughput with the expectation that it might be lossy. + rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { + } +} + +// [#not-implemented-hide:] +// An events envoy sends to the management server. +message StreamEventsRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.event_reporting.v2alpha.StreamEventsRequest"; + + message Identifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.event_reporting.v2alpha.StreamEventsRequest.Identifier"; + + // The node sending the event messages over the stream. + config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; + } + + // Identifier data that will only be sent in the first message on the stream. This is effectively + // structured metadata and is a performance optimization. + Identifier identifier = 1; + + // Batch of events. When the stream is already active, it will be the events occurred + // since the last message had been sent. If the server receives unknown event type, it should + // silently ignore it. + // + // The following events are supported: + // + // * :ref:`HealthCheckEvent ` + // * :ref:`OutlierDetectionEvent ` + repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; +} + +// [#not-implemented-hide:] +// The management server may send envoy a StreamEventsResponse to tell which events the server +// is interested in. In future, with aggregated event reporting service, this message will +// contain, for example, clusters the envoy should send events for, or event types the server +// wants to process. +message StreamEventsResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.event_reporting.v2alpha.StreamEventsResponse"; +} diff --git a/generated_api_shadow/envoy/service/health/v3/hds.proto b/generated_api_shadow/envoy/service/health/v3/hds.proto index af126ced495c..0b09134709c8 100644 --- a/generated_api_shadow/envoy/service/health/v3/hds.proto +++ b/generated_api_shadow/envoy/service/health/v3/hds.proto @@ -9,12 +9,14 @@ import "envoy/config/endpoint/v3/endpoint_components.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.health.v3"; option java_outer_classname = "HdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health Discovery Service (HDS)] diff --git a/generated_api_shadow/envoy/service/listener/v3/lds.proto b/generated_api_shadow/envoy/service/listener/v3/lds.proto index 0a1b6b23564d..a7a8260619f7 100644 --- a/generated_api_shadow/envoy/service/listener/v3/lds.proto +++ b/generated_api_shadow/envoy/service/listener/v3/lds.proto @@ -8,15 +8,16 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.listener.v3"; option java_outer_classname = "LdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener] // Listener :ref:`configuration overview ` diff --git a/generated_api_shadow/envoy/service/load_stats/v2/BUILD b/generated_api_shadow/envoy/service/load_stats/v2/BUILD index e58fe9bd9a3f..504602b339ac 100644 --- a/generated_api_shadow/envoy/service/load_stats/v2/BUILD +++ b/generated_api_shadow/envoy/service/load_stats/v2/BUILD @@ -9,5 +9,6 @@ api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/endpoint:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto index a82d703de8c3..a71039e7ceeb 100644 --- a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto +++ b/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto @@ -7,12 +7,14 @@ import "envoy/api/v2/endpoint/load_report.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.load_stats.v2"; option java_outer_classname = "LrsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Load reporting service] diff --git a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto index 370a8a5925b9..ce48574826a9 100644 --- a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto +++ b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto @@ -7,14 +7,15 @@ import "envoy/config/endpoint/v3/load_report.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.load_stats.v3"; option java_outer_classname = "LrsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Load reporting service] diff --git a/generated_api_shadow/envoy/service/metrics/v2/BUILD b/generated_api_shadow/envoy/service/metrics/v2/BUILD index be8920efa9a0..824992f46200 100644 --- a/generated_api_shadow/envoy/service/metrics/v2/BUILD +++ b/generated_api_shadow/envoy/service/metrics/v2/BUILD @@ -8,6 +8,7 @@ api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", "@prometheus_metrics_model//:client_model", ], ) diff --git a/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto index 5c9a039ccd08..aa5e70385015 100644 --- a/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto +++ b/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto @@ -6,12 +6,14 @@ import "envoy/api/v2/core/base.proto"; import "metrics.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.metrics.v2"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metrics service] diff --git a/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto index 16b4279ef393..033c168c32ba 100644 --- a/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto +++ b/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto @@ -5,14 +5,16 @@ package envoy.service.metrics.v3; import "envoy/config/core/v3/base.proto"; import "metrics.proto"; -import "udpa/annotations/versioning.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.metrics.v3"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metrics service] diff --git a/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto b/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto index 5d9b35e0c9ff..6d97718b4b32 100644 --- a/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto +++ b/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto @@ -6,12 +6,14 @@ import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/ratelimit/ratelimit.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.ratelimit.v2"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate Limit Service (RLS)] @@ -75,6 +77,9 @@ message RateLimitResponse { DAY = 4; } + // A name or description of this limit. + string name = 3; + // The number of requests per unit of time. uint32 requests_per_unit = 1; diff --git a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto index 7d9fd93ba83b..4aad42fcaa81 100644 --- a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto +++ b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto @@ -5,14 +5,15 @@ package envoy.service.ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.ratelimit.v3"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate Limit Service (RLS)] @@ -85,6 +86,9 @@ message RateLimitResponse { DAY = 4; } + // A name or description of this limit. + string name = 3; + // The number of requests per unit of time. uint32 requests_per_unit = 1; diff --git a/generated_api_shadow/envoy/service/route/v3/rds.proto b/generated_api_shadow/envoy/service/route/v3/rds.proto index 3514ebd2a2c7..3a2c432fd8b2 100644 --- a/generated_api_shadow/envoy/service/route/v3/rds.proto +++ b/generated_api_shadow/envoy/service/route/v3/rds.proto @@ -7,15 +7,16 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.route.v3"; option java_outer_classname = "RdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RDS] diff --git a/generated_api_shadow/envoy/service/route/v3/srds.proto b/generated_api_shadow/envoy/service/route/v3/srds.proto index db8ecbe4eb18..7a7f8f7d3a3f 100644 --- a/generated_api_shadow/envoy/service/route/v3/srds.proto +++ b/generated_api_shadow/envoy/service/route/v3/srds.proto @@ -6,14 +6,15 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.route.v3"; option java_outer_classname = "SrdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SRDS] // * Routing :ref:`architecture overview ` diff --git a/generated_api_shadow/envoy/service/runtime/v3/rtds.proto b/generated_api_shadow/envoy/service/runtime/v3/rtds.proto index 69c77f2a4937..b12844233883 100644 --- a/generated_api_shadow/envoy/service/runtime/v3/rtds.proto +++ b/generated_api_shadow/envoy/service/runtime/v3/rtds.proto @@ -7,15 +7,16 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.runtime.v3"; option java_outer_classname = "RtdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Runtime Discovery Service (RTDS)] // RTDS :ref:`configuration overview ` diff --git a/generated_api_shadow/envoy/service/secret/v3/sds.proto b/generated_api_shadow/envoy/service/secret/v3/sds.proto index e541ca9882b3..3c9441d7c760 100644 --- a/generated_api_shadow/envoy/service/secret/v3/sds.proto +++ b/generated_api_shadow/envoy/service/secret/v3/sds.proto @@ -6,14 +6,15 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.secret.v3"; option java_outer_classname = "SdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Secret Discovery Service (SDS)] diff --git a/generated_api_shadow/envoy/service/status/v2/BUILD b/generated_api_shadow/envoy/service/status/v2/BUILD index c3d204fd52a1..6e2c33fd2827 100644 --- a/generated_api_shadow/envoy/service/status/v2/BUILD +++ b/generated_api_shadow/envoy/service/status/v2/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/admin/v2alpha:pkg", "//envoy/api/v2/core:pkg", "//envoy/type/matcher:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/service/status/v2/csds.proto b/generated_api_shadow/envoy/service/status/v2/csds.proto index 764c95b01b6a..2233f3cef771 100644 --- a/generated_api_shadow/envoy/service/status/v2/csds.proto +++ b/generated_api_shadow/envoy/service/status/v2/csds.proto @@ -9,10 +9,13 @@ import "envoy/type/matcher/node.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.service.status.v2"; option java_outer_classname = "CsdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Client Status Discovery Service (CSDS)] diff --git a/generated_api_shadow/envoy/service/status/v3/csds.proto b/generated_api_shadow/envoy/service/status/v3/csds.proto index 72832b4ad4b7..3347def21d8f 100644 --- a/generated_api_shadow/envoy/service/status/v3/csds.proto +++ b/generated_api_shadow/envoy/service/status/v3/csds.proto @@ -9,12 +9,14 @@ import "envoy/type/matcher/v3/node.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.status.v3"; option java_outer_classname = "CsdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Client Status Discovery Service (CSDS)] diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/common.proto b/generated_api_shadow/envoy/service/tap/v2alpha/common.proto index f29400504bbd..990a3826481b 100644 --- a/generated_api_shadow/envoy/service/tap/v2alpha/common.proto +++ b/generated_api_shadow/envoy/service/tap/v2alpha/common.proto @@ -9,12 +9,14 @@ import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.tap.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common tap configuration] diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto b/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto index c0d25a1b57e5..9fd18eae5d36 100644 --- a/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto +++ b/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto @@ -5,12 +5,14 @@ package envoy.service.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/data/tap/v2alpha/wrapper.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap Sink Service] diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/tapds.proto b/generated_api_shadow/envoy/service/tap/v2alpha/tapds.proto index 6ef1288d1319..81b9cb0e447b 100644 --- a/generated_api_shadow/envoy/service/tap/v2alpha/tapds.proto +++ b/generated_api_shadow/envoy/service/tap/v2alpha/tapds.proto @@ -7,12 +7,14 @@ import "envoy/service/tap/v2alpha/common.proto"; import "google/api/annotations.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; option java_outer_classname = "TapdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap discovery service] diff --git a/generated_api_shadow/envoy/service/tap/v3/tap.proto b/generated_api_shadow/envoy/service/tap/v3/tap.proto index bf269e388024..080aba215c10 100644 --- a/generated_api_shadow/envoy/service/tap/v3/tap.proto +++ b/generated_api_shadow/envoy/service/tap/v3/tap.proto @@ -5,14 +5,15 @@ package envoy.service.tap.v3; import "envoy/config/core/v3/base.proto"; import "envoy/data/tap/v3/wrapper.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap Sink Service] diff --git a/generated_api_shadow/envoy/service/tap/v3/tapds.proto b/generated_api_shadow/envoy/service/tap/v3/tapds.proto index 80e550e9b6af..51393d6e14c7 100644 --- a/generated_api_shadow/envoy/service/tap/v3/tapds.proto +++ b/generated_api_shadow/envoy/service/tap/v3/tapds.proto @@ -7,14 +7,15 @@ import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v3"; option java_outer_classname = "TapdsProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap discovery service] diff --git a/generated_api_shadow/envoy/service/trace/v2/BUILD b/generated_api_shadow/envoy/service/trace/v2/BUILD index 6fce6d2d917a..dec3717aa573 100644 --- a/generated_api_shadow/envoy/service/trace/v2/BUILD +++ b/generated_api_shadow/envoy/service/trace/v2/BUILD @@ -8,6 +8,7 @@ api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", ], ) diff --git a/generated_api_shadow/envoy/service/trace/v2/trace_service.proto b/generated_api_shadow/envoy/service/trace/v2/trace_service.proto index 81449dab8675..48e65820b387 100644 --- a/generated_api_shadow/envoy/service/trace/v2/trace_service.proto +++ b/generated_api_shadow/envoy/service/trace/v2/trace_service.proto @@ -8,12 +8,14 @@ import "google/api/annotations.proto"; import "opencensus/proto/trace/v1/trace.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.trace.v2"; option java_outer_classname = "TraceServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Trace service] diff --git a/generated_api_shadow/envoy/service/trace/v3/trace_service.proto b/generated_api_shadow/envoy/service/trace/v3/trace_service.proto index b33d3af75398..facaa9211c92 100644 --- a/generated_api_shadow/envoy/service/trace/v3/trace_service.proto +++ b/generated_api_shadow/envoy/service/trace/v3/trace_service.proto @@ -7,14 +7,16 @@ import "envoy/config/core/v3/base.proto"; import "google/api/annotations.proto"; import "opencensus/proto/trace/v1/trace.proto"; -import "udpa/annotations/versioning.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.trace.v3"; option java_outer_classname = "TraceServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Trace service] diff --git a/generated_api_shadow/envoy/type/BUILD b/generated_api_shadow/envoy/type/BUILD index 5dc095ade27a..ef3541ebcb1d 100644 --- a/generated_api_shadow/envoy/type/BUILD +++ b/generated_api_shadow/envoy/type/BUILD @@ -4,4 +4,6 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -api_proto_package() +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/type/hash_policy.proto b/generated_api_shadow/envoy/type/hash_policy.proto index 1e13e60b286b..b6aeb31fcbfd 100644 --- a/generated_api_shadow/envoy/type/hash_policy.proto +++ b/generated_api_shadow/envoy/type/hash_policy.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "HashPolicyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Hash Policy] diff --git a/generated_api_shadow/envoy/type/http.proto b/generated_api_shadow/envoy/type/http.proto index 12160c6354a9..c1c787411fad 100644 --- a/generated_api_shadow/envoy/type/http.proto +++ b/generated_api_shadow/envoy/type/http.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP] diff --git a/generated_api_shadow/envoy/type/http_status.proto b/generated_api_shadow/envoy/type/http_status.proto index e81c4f9d11dc..99b44a98c251 100644 --- a/generated_api_shadow/envoy/type/http_status.proto +++ b/generated_api_shadow/envoy/type/http_status.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "HttpStatusProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP status codes] diff --git a/generated_api_shadow/envoy/type/matcher/BUILD b/generated_api_shadow/envoy/type/matcher/BUILD index 1f0bfe3335d0..e2a45aba90ec 100644 --- a/generated_api_shadow/envoy/type/matcher/BUILD +++ b/generated_api_shadow/envoy/type/matcher/BUILD @@ -8,5 +8,6 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/type:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/type/matcher/metadata.proto b/generated_api_shadow/envoy/type/matcher/metadata.proto index 43dd5b7ad139..2cbc602564c5 100644 --- a/generated_api_shadow/envoy/type/matcher/metadata.proto +++ b/generated_api_shadow/envoy/type/matcher/metadata.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "envoy/type/matcher/value.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metadata matcher] diff --git a/generated_api_shadow/envoy/type/matcher/node.proto b/generated_api_shadow/envoy/type/matcher/node.proto index 937aeba63086..c9e84a46279a 100644 --- a/generated_api_shadow/envoy/type/matcher/node.proto +++ b/generated_api_shadow/envoy/type/matcher/node.proto @@ -5,9 +5,12 @@ package envoy.type.matcher; import "envoy/type/matcher/string.proto"; import "envoy/type/matcher/struct.proto"; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "NodeProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Node matcher] diff --git a/generated_api_shadow/envoy/type/matcher/number.proto b/generated_api_shadow/envoy/type/matcher/number.proto index 52a6eb6e15ce..e488f16a4a0c 100644 --- a/generated_api_shadow/envoy/type/matcher/number.proto +++ b/generated_api_shadow/envoy/type/matcher/number.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "envoy/type/range.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "NumberProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Number matcher] diff --git a/generated_api_shadow/envoy/type/matcher/path.proto b/generated_api_shadow/envoy/type/matcher/path.proto index 779339a2d260..860a1c69f18a 100644 --- a/generated_api_shadow/envoy/type/matcher/path.proto +++ b/generated_api_shadow/envoy/type/matcher/path.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "envoy/type/matcher/string.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "PathProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Path matcher] diff --git a/generated_api_shadow/envoy/type/matcher/regex.proto b/generated_api_shadow/envoy/type/matcher/regex.proto index 2be13845fc00..78b4a2c1d61e 100644 --- a/generated_api_shadow/envoy/type/matcher/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/regex.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "RegexProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Regex matcher] diff --git a/generated_api_shadow/envoy/type/matcher/string.proto b/generated_api_shadow/envoy/type/matcher/string.proto index 2cbfc2476492..431043e00ec1 100644 --- a/generated_api_shadow/envoy/type/matcher/string.proto +++ b/generated_api_shadow/envoy/type/matcher/string.proto @@ -5,11 +5,13 @@ package envoy.type.matcher; import "envoy/type/matcher/regex.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "StringProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: String matcher] diff --git a/generated_api_shadow/envoy/type/matcher/struct.proto b/generated_api_shadow/envoy/type/matcher/struct.proto index 245d839b21e3..f65b1d121845 100644 --- a/generated_api_shadow/envoy/type/matcher/struct.proto +++ b/generated_api_shadow/envoy/type/matcher/struct.proto @@ -4,11 +4,13 @@ package envoy.type.matcher; import "envoy/type/matcher/value.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "StructProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Struct matcher] diff --git a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto b/generated_api_shadow/envoy/type/matcher/v3/metadata.proto index 94b27a0ba835..65ec4f47ffff 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/metadata.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/value.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metadata matcher] diff --git a/generated_api_shadow/envoy/type/matcher/v3/node.proto b/generated_api_shadow/envoy/type/matcher/v3/node.proto index 602ae2e70650..fe507312135f 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/node.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/node.proto @@ -5,11 +5,13 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/matcher/v3/struct.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "NodeProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Node matcher] diff --git a/generated_api_shadow/envoy/type/matcher/v3/number.proto b/generated_api_shadow/envoy/type/matcher/v3/number.proto index c5b722dc57ea..2379efdcbd23 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/number.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/number.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "envoy/type/v3/range.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "NumberProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Number matcher] diff --git a/generated_api_shadow/envoy/type/matcher/v3/path.proto b/generated_api_shadow/envoy/type/matcher/v3/path.proto index 68e0bee83c6e..0ce89871c9d9 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/path.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/path.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/string.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "PathProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Path matcher] diff --git a/generated_api_shadow/envoy/type/matcher/v3/regex.proto b/generated_api_shadow/envoy/type/matcher/v3/regex.proto index acfb905ea01c..1b10df3ff1ba 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/regex.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "RegexProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Regex matcher] @@ -32,15 +33,15 @@ message RegexMatcher { google.protobuf.UInt32Value max_program_size = 1; } + // Google's RE2 regex engine. + string regex = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof engine_type { option (validate.required) = true; - // Google's RE2 regex engine. + // The regex match string. The string must be supported by the configured engine. GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; } - - // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_bytes: 1}]; } // Describes how to match a string and then produce a new string using a regular diff --git a/generated_api_shadow/envoy/type/matcher/v3/string.proto b/generated_api_shadow/envoy/type/matcher/v3/string.proto index b6cf40f819c9..2f9d43de40dc 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/string.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/string.proto @@ -4,14 +4,15 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/regex.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "StringProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: String matcher] @@ -20,23 +21,23 @@ option java_multiple_files = true; message StringMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StringMatcher"; + // The input string must match exactly the string specified here. + // + // Examples: + // + // * *abc* only matches the value *abc*. + bool ignore_case = 6; + oneof match_pattern { option (validate.required) = true; - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - string exact = 1; - // The input string must have the prefix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + string exact = 1; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -44,35 +45,22 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; // The input string must match the regular expression specified here. - // The regex grammar is defined `here - // `_. - // - // Examples: - // - // * The regex ``\d{3}`` matches the value *123* - // * The regex ``\d{3}`` does not match the value *1234* - // * The regex ``\d{3}`` does not match the value *123.456* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex` as it is not safe for use with - // untrusted input in all cases. + string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; + string hidden_envoy_deprecated_regex = 4 [ deprecated = true, (validate.rules).string = {max_bytes: 1024}, (envoy.annotations.disallowed_by_default) = true ]; - - // The input string must match the regular expression specified here. - RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; } - - // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no - // effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. - bool ignore_case = 6; } // Specifies a list of ways to match a string. diff --git a/generated_api_shadow/envoy/type/matcher/v3/struct.proto b/generated_api_shadow/envoy/type/matcher/v3/struct.proto index 97e214d79e6a..b88d7b11bc2a 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/struct.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/struct.proto @@ -4,13 +4,14 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/value.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "StructProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Struct matcher] diff --git a/generated_api_shadow/envoy/type/matcher/v3/value.proto b/generated_api_shadow/envoy/type/matcher/v3/value.proto index 6ad8750c5fa0..040332273ba3 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/value.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/value.proto @@ -5,13 +5,14 @@ package envoy.type.matcher.v3; import "envoy/type/matcher/v3/number.proto"; import "envoy/type/matcher/v3/string.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "ValueProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Value matcher] diff --git a/generated_api_shadow/envoy/type/matcher/value.proto b/generated_api_shadow/envoy/type/matcher/value.proto index dda49958904f..aaecd14e8ecd 100644 --- a/generated_api_shadow/envoy/type/matcher/value.proto +++ b/generated_api_shadow/envoy/type/matcher/value.proto @@ -5,11 +5,13 @@ package envoy.type.matcher; import "envoy/type/matcher/number.proto"; import "envoy/type/matcher/string.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "ValueProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Value matcher] diff --git a/generated_api_shadow/envoy/type/metadata/v2/metadata.proto b/generated_api_shadow/envoy/type/metadata/v2/metadata.proto index 67653519ba97..43a1a7ca9275 100644 --- a/generated_api_shadow/envoy/type/metadata/v2/metadata.proto +++ b/generated_api_shadow/envoy/type/metadata/v2/metadata.proto @@ -3,12 +3,14 @@ syntax = "proto3"; package envoy.type.metadata.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.metadata.v2"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.type.metadata.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metadata] diff --git a/generated_api_shadow/envoy/type/metadata/v3/metadata.proto b/generated_api_shadow/envoy/type/metadata/v3/metadata.proto index f8a98d0b5805..ddcce6882057 100644 --- a/generated_api_shadow/envoy/type/metadata/v3/metadata.proto +++ b/generated_api_shadow/envoy/type/metadata/v3/metadata.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.type.metadata.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.metadata.v3"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metadata] diff --git a/generated_api_shadow/envoy/type/percent.proto b/generated_api_shadow/envoy/type/percent.proto index 3420342dee2f..fc41a26662fe 100644 --- a/generated_api_shadow/envoy/type/percent.proto +++ b/generated_api_shadow/envoy/type/percent.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "PercentProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Percent] diff --git a/generated_api_shadow/envoy/type/range.proto b/generated_api_shadow/envoy/type/range.proto index e550ca19bfc3..79aaa81975c3 100644 --- a/generated_api_shadow/envoy/type/range.proto +++ b/generated_api_shadow/envoy/type/range.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "RangeProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Range] diff --git a/generated_api_shadow/envoy/type/semantic_version.proto b/generated_api_shadow/envoy/type/semantic_version.proto index a7dbf7ebd6ef..80fe016bfa16 100644 --- a/generated_api_shadow/envoy/type/semantic_version.proto +++ b/generated_api_shadow/envoy/type/semantic_version.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.type; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "SemanticVersionProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Semantic Version] diff --git a/generated_api_shadow/envoy/type/token_bucket.proto b/generated_api_shadow/envoy/type/token_bucket.proto index b293b76be192..41b6d268d5f6 100644 --- a/generated_api_shadow/envoy/type/token_bucket.proto +++ b/generated_api_shadow/envoy/type/token_bucket.proto @@ -5,11 +5,13 @@ package envoy.type; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "TokenBucketProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Token bucket] diff --git a/generated_api_shadow/envoy/type/tracing/v2/BUILD b/generated_api_shadow/envoy/type/tracing/v2/BUILD index 7088ddfe0dad..34e1b604ce9f 100644 --- a/generated_api_shadow/envoy/type/tracing/v2/BUILD +++ b/generated_api_shadow/envoy/type/tracing/v2/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/type/metadata/v2:pkg"], + deps = [ + "//envoy/type/metadata/v2:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto b/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto index 750c07f79943..7506ae886125 100644 --- a/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto +++ b/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto @@ -4,11 +4,13 @@ package envoy.type.tracing.v2; import "envoy/type/metadata/v2/metadata.proto"; +import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.tracing.v2"; option java_outer_classname = "CustomTagProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Custom Tag] diff --git a/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto b/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto index 9b8d6029e127..42518ead59d1 100644 --- a/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto +++ b/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto @@ -4,13 +4,14 @@ package envoy.type.tracing.v3; import "envoy/type/metadata/v3/metadata.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.tracing.v3"; option java_outer_classname = "CustomTagProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Custom Tag] diff --git a/generated_api_shadow/envoy/type/v3/hash_policy.proto b/generated_api_shadow/envoy/type/v3/hash_policy.proto index 2a27306b1171..96c39299698f 100644 --- a/generated_api_shadow/envoy/type/v3/hash_policy.proto +++ b/generated_api_shadow/envoy/type/v3/hash_policy.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "HashPolicyProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Hash Policy] diff --git a/generated_api_shadow/envoy/type/v3/http.proto b/generated_api_shadow/envoy/type/v3/http.proto index 2018b8a1e76b..fec15d11f871 100644 --- a/generated_api_shadow/envoy/type/v3/http.proto +++ b/generated_api_shadow/envoy/type/v3/http.proto @@ -2,9 +2,12 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; + option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP] diff --git a/generated_api_shadow/envoy/type/v3/http_status.proto b/generated_api_shadow/envoy/type/v3/http_status.proto index ca990b17c75b..8914b7a0264a 100644 --- a/generated_api_shadow/envoy/type/v3/http_status.proto +++ b/generated_api_shadow/envoy/type/v3/http_status.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "HttpStatusProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP status codes] diff --git a/generated_api_shadow/envoy/type/v3/percent.proto b/generated_api_shadow/envoy/type/v3/percent.proto index 80439bc606b2..3a89a3f44fd5 100644 --- a/generated_api_shadow/envoy/type/v3/percent.proto +++ b/generated_api_shadow/envoy/type/v3/percent.proto @@ -2,13 +2,14 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "PercentProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Percent] diff --git a/generated_api_shadow/envoy/type/v3/range.proto b/generated_api_shadow/envoy/type/v3/range.proto index c0e8348768dc..de1d55b09a21 100644 --- a/generated_api_shadow/envoy/type/v3/range.proto +++ b/generated_api_shadow/envoy/type/v3/range.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "RangeProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Range] diff --git a/generated_api_shadow/envoy/type/v3/semantic_version.proto b/generated_api_shadow/envoy/type/v3/semantic_version.proto index 38f3484ae58b..a4126336f03a 100644 --- a/generated_api_shadow/envoy/type/v3/semantic_version.proto +++ b/generated_api_shadow/envoy/type/v3/semantic_version.proto @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.type.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "SemanticVersionProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Semantic Version] diff --git a/generated_api_shadow/envoy/type/v3/token_bucket.proto b/generated_api_shadow/envoy/type/v3/token_bucket.proto index 34296f3ae37f..a96d50fbd0ab 100644 --- a/generated_api_shadow/envoy/type/v3/token_bucket.proto +++ b/generated_api_shadow/envoy/type/v3/token_bucket.proto @@ -5,13 +5,14 @@ package envoy.type.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; - import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "TokenBucketProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Token bucket] diff --git a/include/envoy/api/os_sys_calls.h b/include/envoy/api/os_sys_calls.h index 1c6accb3af8d..9fab9c1cd01b 100644 --- a/include/envoy/api/os_sys_calls.h +++ b/include/envoy/api/os_sys_calls.h @@ -51,6 +51,17 @@ class OsSysCalls { */ virtual SysCallSizeResult recvmsg(os_fd_t sockfd, msghdr* msg, int flags) PURE; + /** + * @see recvmmsg (man 2 recvmmsg) + */ + virtual SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, + int flags, struct timespec* timeout) PURE; + + /** + * return true if the OS supports recvmmsg() and sendmmsg(). + */ + virtual bool supportsMmsg() const PURE; + /** * Release all resources allocated for fd. * @return zero on success, -1 returned otherwise. diff --git a/include/envoy/buffer/BUILD b/include/envoy/buffer/BUILD index 2a24c6fa2409..e22d136b17de 100644 --- a/include/envoy/buffer/BUILD +++ b/include/envoy/buffer/BUILD @@ -11,6 +11,9 @@ envoy_package() envoy_cc_library( name = "buffer_interface", hdrs = ["buffer.h"], + external_deps = [ + "abseil_inlined_vector", + ], deps = [ "//include/envoy/api:os_sys_calls_interface", "//include/envoy/network:io_handle_interface", diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index 6c6463cb3ff4..61d803c2d204 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -13,7 +13,9 @@ #include "common/common/byte_order.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" +#include "absl/types/optional.h" namespace Envoy { namespace Buffer { @@ -28,6 +30,8 @@ struct RawSlice { bool operator==(const RawSlice& rhs) const { return mem_ == rhs.mem_ && len_ == rhs.len_; } }; +using RawSliceVector = absl::InlinedVector; + /** * A wrapper class to facilitate passing in externally owned data to a buffer via addBufferFragment. * When the buffer no longer needs the data passed in through a fragment, it calls done() on it. @@ -124,14 +128,12 @@ class Instance { virtual void drain(uint64_t size) PURE; /** - * Fetch the raw buffer slices. This routine is optimized for performance. - * @param out supplies an array of RawSlice objects to fill. - * @param out_size supplies the size of out. - * @return the actual number of slices needed, which may be greater than out_size. Passing - * nullptr for out and 0 for out_size will just return the size of the array needed - * to capture all of the slice data. + * Fetch the raw buffer slices. + * @param max_slices supplies an optional limit on the number of slices to fetch, for performance. + * @return RawSliceVector with non-empty slices in the buffer. */ - virtual uint64_t getRawSlices(RawSlice* out, uint64_t out_size) const PURE; + virtual RawSliceVector + getRawSlices(absl::optional max_slices = absl::nullopt) const PURE; /** * @return uint64_t the total length of the buffer (not necessarily contiguous in memory). diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index d2ba0d58ac2e..1d7c58fd3b2a 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -33,6 +33,7 @@ #include #include +#include #define htole16(x) (x) #define htole32(x) (x) @@ -184,3 +185,23 @@ using os_fd_t = int; #define ENVOY_SHUT_RDWR SHUT_RDWR #endif + +// Note: chromium disabled recvmmsg regardless of ndk version. However, the only Android target +// currently actively using Envoy is Envoy Mobile, where recvmmsg is not actively disabled. In fact, +// defining mmsghdr here caused a conflicting definition with the ndk's definition of the struct +// (https://github.com/lyft/envoy-mobile/pull/772/checks?check_run_id=534152886#step:4:64). +// Therefore, we decided to remove the Android check introduced here in +// https://github.com/envoyproxy/envoy/pull/10120. If someone out there encounters problems with +// this please bring up in Envoy's slack channel #envoy-udp-quic-dev. +#if defined(__linux__) +#define ENVOY_MMSG_MORE 1 +#else +#define ENVOY_MMSG_MORE 0 +#define MSG_WAITFORONE 0x10000 // recvmmsg(): block until 1+ packets avail. +// Posix structure for describing messages sent by 'sendmmsg` and received by +// 'recvmmsg' +struct mmsghdr { + struct msghdr msg_hdr; + unsigned int msg_len; +}; +#endif diff --git a/include/envoy/common/pure.h b/include/envoy/common/pure.h index ecc74586618b..11c7d9a0700a 100644 --- a/include/envoy/common/pure.h +++ b/include/envoy/common/pure.h @@ -1,8 +1,8 @@ #pragma once -namespace Envoy { +// NOLINT(namespace-envoy) + /** * Friendly name for a pure virtual routine. */ #define PURE = 0 -} // namespace Envoy diff --git a/include/envoy/config/BUILD b/include/envoy/config/BUILD index cd909d922ddb..50fca3a73007 100644 --- a/include/envoy/config/BUILD +++ b/include/envoy/config/BUILD @@ -53,6 +53,7 @@ envoy_cc_library( hdrs = ["subscription.h"], deps = [ "//include/envoy/stats:stats_macros", + "//source/common/config:api_type_oracle_lib", "//source/common/protobuf", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], @@ -75,13 +76,3 @@ envoy_cc_library( "//source/common/protobuf", ], ) - -envoy_cc_library( - name = "discovery_service_base_interface", - hdrs = ["discovery_service_base.h"], - deps = [ - ":subscription_interface", - "//source/common/config:api_type_oracle_lib", - "//source/common/protobuf", - ], -) diff --git a/include/envoy/config/discovery_service_base.h b/include/envoy/config/discovery_service_base.h deleted file mode 100644 index 52ae80e127c6..000000000000 --- a/include/envoy/config/discovery_service_base.h +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once - -#include - -#include "envoy/config/discovery_service_base.h" -#include "envoy/config/subscription.h" - -#include "common/config/api_type_oracle.h" - -namespace Envoy { -namespace Config { -template struct SubscriptionBase : public Config::SubscriptionCallbacks { - static std::string getResourceName(envoy::config::core::v3::ApiVersion resource_api_version) { - switch (resource_api_version) { - case envoy::config::core::v3::ApiVersion::AUTO: - case envoy::config::core::v3::ApiVersion::V2: - return ApiTypeOracle::getEarlierVersionMessageTypeName(Current().GetDescriptor()->full_name()) - .value(); - case envoy::config::core::v3::ApiVersion::V3: - return Current().GetDescriptor()->full_name(); - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - } -}; - -} // namespace Config -} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/config/typed_config.h b/include/envoy/config/typed_config.h index 574562ff6bc0..49cc2c1f3416 100644 --- a/include/envoy/config/typed_config.h +++ b/include/envoy/config/typed_config.h @@ -39,7 +39,7 @@ class UntypedFactory { */ class TypedFactory : public UntypedFactory { public: - virtual ~TypedFactory() = default; + ~TypedFactory() override = default; /** * @return ProtobufTypes::MessagePtr create empty config proto message for v2. The config, which diff --git a/include/envoy/config/typed_metadata.h b/include/envoy/config/typed_metadata.h index 680252e21dd3..a05ca8235d64 100644 --- a/include/envoy/config/typed_metadata.h +++ b/include/envoy/config/typed_metadata.h @@ -53,7 +53,7 @@ class TypedMetadata { */ class TypedMetadataFactory : public UntypedFactory { public: - virtual ~TypedMetadataFactory() = default; + ~TypedMetadataFactory() override = default; /** * Convert the google.protobuf.Struct into an instance of TypedMetadata::Object. diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index 11e78c4f6895..4c22aff1c685 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -20,6 +20,7 @@ #include "envoy/network/transport_socket.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" +#include "envoy/stream_info/stream_info.h" #include "envoy/thread/thread.h" namespace Envoy { @@ -75,11 +76,13 @@ class Dispatcher { * @param socket supplies an open file descriptor and connection metadata to use for the * connection. Takes ownership of the socket. * @param transport_socket supplies a transport socket to be used by the connection. + * @param stream_info info object for the server connection * @return Network::ConnectionPtr a server connection that is owned by the caller. */ virtual Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket, - Network::TransportSocketPtr&& transport_socket) PURE; + Network::TransportSocketPtr&& transport_socket, + StreamInfo::StreamInfo& stream_info) PURE; /** * Creates an instance of Envoy's Network::ClientConnection. Does NOT initiate the connection; diff --git a/include/envoy/filesystem/watcher.h b/include/envoy/filesystem/watcher.h index 411923fe31ad..23382690874c 100644 --- a/include/envoy/filesystem/watcher.h +++ b/include/envoy/filesystem/watcher.h @@ -30,6 +30,9 @@ class Watcher { /** * Add a file watch. * @param path supplies the path to watch. + * If path is a file, callback is called on events for the given file. + * If path is a directory (ends with "/"), callback is called on events + * for the given directory. * @param events supplies the events to watch. * @param cb supplies the callback to invoke when a change occurs. */ diff --git a/include/envoy/grpc/context.h b/include/envoy/grpc/context.h index f1837c604e9e..191a2583cb9e 100644 --- a/include/envoy/grpc/context.h +++ b/include/envoy/grpc/context.h @@ -19,19 +19,20 @@ class Context { enum class Protocol { Grpc, GrpcWeb }; - struct RequestNames; + struct RequestStatNames; /** * Parses out request grpc service-name and method from the path, returning a - * populated RequestNames if successful. See the implementation - * (source/common/grpc/common.h) for the definition of RequestNames. It is + * populated RequestStatNames if successful. See the implementation + * (source/common/grpc/common.h) for the definition of RequestStatNames. It is * hidden in the implementation since it references StatName, which is defined * only in the stats implementation. * * @param path the request path. * @return the request names, expressed as StatName. */ - virtual absl::optional resolveServiceAndMethod(const Http::HeaderEntry* path) PURE; + virtual absl::optional + resolveDynamicServiceAndMethod(const Http::HeaderEntry* path) PURE; /** * Charge a success/failure stat to a cluster/service/method. @@ -41,7 +42,7 @@ class Context { * @param grpc_status supplies the gRPC status. */ virtual void chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, - const RequestNames& request_names, + const absl::optional& request_names, const Http::HeaderEntry* grpc_status) PURE; /** @@ -52,7 +53,7 @@ class Context { * @param success supplies whether the call succeeded. */ virtual void chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, - const RequestNames& request_names, bool success) PURE; + const absl::optional& request_names, bool success) PURE; /** * Charge a success/failure stat to a cluster/service/method. @@ -60,8 +61,8 @@ class Context { * @param request_names supplies the request names. * @param success supplies whether the call succeeded. */ - virtual void chargeStat(const Upstream::ClusterInfo& cluster, const RequestNames& request_names, - bool success) PURE; + virtual void chargeStat(const Upstream::ClusterInfo& cluster, + const absl::optional& request_names, bool success) PURE; /** * Charge a request message stat to a cluster/service/method. @@ -70,7 +71,8 @@ class Context { * @param amount supplies the number of the request messages. */ virtual void chargeRequestMessageStat(const Upstream::ClusterInfo& cluster, - const RequestNames& request_names, uint64_t amount) PURE; + const absl::optional& request_names, + uint64_t amount) PURE; /** * Charge a response message stat to a cluster/service/method. @@ -79,7 +81,8 @@ class Context { * @param amount supplies the number of the response messages. */ virtual void chargeResponseMessageStat(const Upstream::ClusterInfo& cluster, - const RequestNames& request_names, uint64_t amount) PURE; + const absl::optional& request_names, + uint64_t amount) PURE; /** * @return a struct containing StatNames for gRPC stat tokens. diff --git a/include/envoy/grpc/google_grpc_creds.h b/include/envoy/grpc/google_grpc_creds.h index 831cd78010b3..32dd71efdd75 100644 --- a/include/envoy/grpc/google_grpc_creds.h +++ b/include/envoy/grpc/google_grpc_creds.h @@ -17,7 +17,7 @@ namespace Grpc { */ class GoogleGrpcCredentialsFactory : public Config::UntypedFactory { public: - virtual ~GoogleGrpcCredentialsFactory() = default; + ~GoogleGrpcCredentialsFactory() override = default; /** * Get a ChannelCredentials to be used for authentication of a gRPC channel. diff --git a/include/envoy/http/BUILD b/include/envoy/http/BUILD index 27f8507e2d00..2c67a90ace3b 100644 --- a/include/envoy/http/BUILD +++ b/include/envoy/http/BUILD @@ -126,3 +126,11 @@ envoy_cc_library( name = "metadata_interface", hdrs = ["metadata_interface.h"], ) + +envoy_cc_library( + name = "request_id_extension_interface", + hdrs = ["request_id_extension.h"], + deps = [ + ":header_map_interface", + ], +) diff --git a/include/envoy/http/async_client.h b/include/envoy/http/async_client.h index a38555acb7ff..9e95df1cc2f7 100644 --- a/include/envoy/http/async_client.h +++ b/include/envoy/http/async_client.h @@ -20,6 +20,19 @@ namespace Http { */ class AsyncClient { public: + /** + * An in-flight HTTP request. + */ + class Request { + public: + virtual ~Request() = default; + + /** + * Signals that the request should be cancelled. + */ + virtual void cancel() PURE; + }; + /** * Async Client failure reasons. */ @@ -30,6 +43,9 @@ class AsyncClient { /** * Notifies caller of async HTTP request status. + * + * To support a use case where a caller makes multiple requests in parallel, + * individual callback methods provide request context corresponding to that response. */ class Callbacks { public: @@ -37,14 +53,23 @@ class AsyncClient { /** * Called when the async HTTP request succeeds. + * @param request request handle. + * NOTE: request handle is passed for correlation purposes only, e.g. + * for client code to be able to exclude that handle from a list of + * requests in progress. * @param response the HTTP response */ - virtual void onSuccess(ResponseMessagePtr&& response) PURE; + virtual void onSuccess(const Request& request, ResponseMessagePtr&& response) PURE; /** * Called when the async HTTP request fails. + * @param request request handle. + * NOTE: request handle is passed for correlation purposes only, e.g. + * for client code to be able to exclude that handle from a list of + * requests in progress. + * @param reason failure reason */ - virtual void onFailure(FailureReason reason) PURE; + virtual void onFailure(const Request& request, FailureReason reason) PURE; }; /** @@ -92,19 +117,6 @@ class AsyncClient { virtual void onReset() PURE; }; - /** - * An in-flight HTTP request. - */ - class Request { - public: - virtual ~Request() = default; - - /** - * Signals that the request should be cancelled. - */ - virtual void cancel() PURE; - }; - /** * An in-flight HTTP stream. */ diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 4e08534741b8..39e11b195449 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -26,6 +26,23 @@ const char MaxResponseHeadersCountOverrideKey[] = class Stream; +/** + * Stream encoder options specific to HTTP/1. + */ +class Http1StreamEncoderOptions { +public: + virtual ~Http1StreamEncoderOptions() = default; + + /** + * Force disable chunk encoding, even if there is no known content length. This effectively forces + * HTTP/1.0 behavior in which the connection will need to be closed to indicate end of stream. + */ + virtual void disableChunkEncoding() PURE; +}; + +using Http1StreamEncoderOptionsOptRef = + absl::optional>; + /** * Encodes an HTTP stream. This interface contains methods common to both the request and response * path. @@ -53,6 +70,12 @@ class StreamEncoder { * @param metadata_map_vector is the vector of metadata maps to encode. */ virtual void encodeMetadata(const MetadataMapVector& metadata_map_vector) PURE; + + /** + * Return the HTTP/1 stream encoder options if applicable. If the stream is not HTTP/1 returns + * absl::nullopt. + */ + virtual Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() PURE; }; /** diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index 53a10d8ebd45..c97589d3cb69 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -676,6 +676,12 @@ class StreamEncoderFilterCallbacks : public virtual StreamFilterCallbacks { * @return the buffer limit the filter should apply. */ virtual uint32_t encoderBufferLimit() PURE; + + /** + * Return the HTTP/1 stream encoder options if applicable. If the stream is not HTTP/1 returns + * absl::nullopt. + */ + virtual Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() PURE; }; /** diff --git a/include/envoy/http/hash_policy.h b/include/envoy/http/hash_policy.h index bab03d57c2f6..2a031247d93d 100644 --- a/include/envoy/http/hash_policy.h +++ b/include/envoy/http/hash_policy.h @@ -12,7 +12,7 @@ namespace Http { class Hashable { public: virtual absl::optional hash() const PURE; - virtual ~Hashable() {} + virtual ~Hashable() = default; }; /** diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 8dcc22a6be09..0c8ddb6adcfd 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -346,7 +346,6 @@ class HeaderEntry { HEADER_FUNC(EnvoyAttemptCount) \ HEADER_FUNC(EnvoyDecoratorOperation) \ HEADER_FUNC(KeepAlive) \ - HEADER_FUNC(NoChunks) \ HEADER_FUNC(ProxyConnection) \ HEADER_FUNC(RequestId) \ HEADER_FUNC(TransferEncoding) \ diff --git a/include/envoy/http/request_id_extension.h b/include/envoy/http/request_id_extension.h new file mode 100644 index 000000000000..953c86e99ef2 --- /dev/null +++ b/include/envoy/http/request_id_extension.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/http/header_map.h" + +namespace Envoy { +namespace Http { + +enum class TraceStatus { NoTrace, Sampled, Client, Forced }; + +/** + * Abstract request id utilities for getting/setting the request IDs and tracing status of requests + */ +class RequestIDExtension { +public: + virtual ~RequestIDExtension() = default; + + /** + * Directly set a request ID into the provided request headers. Override any previous request ID + * if any. + * @param request_headers supplies the incoming request headers for setting a request ID. + * @param force specifies if a new request ID should be forcefully set if one is already present. + */ + virtual void set(Http::RequestHeaderMap& request_headers, bool force) PURE; + + /** + * Preserve request ID in response headers if any is set in the request headers. + * @param response_headers supplies the downstream response headers for setting the request ID. + * @param request_headers supplies the incoming request headers for retrieving the request ID. + */ + virtual void setInResponse(Http::ResponseHeaderMap& response_headers, + const Http::RequestHeaderMap& request_headers) PURE; + + /** + * Perform a mod operation across the request id within a request and store the result in the + * provided output variable. This is used to perform sampling and validate the request ID. + * @param request_headers supplies the incoming request headers for retrieving the request ID. + * @param out reference to a variable where we store the result of the mod operation. + * @param mod integer to mod the request ID by. + * @return true if request ID is valid and out is populated by the result. + */ + virtual bool modBy(const Http::RequestHeaderMap& request_headers, uint64_t& out, + uint64_t mod) PURE; + + /** + * Get the current tracing status of a request given its headers. + * @param request_headers supplies the incoming request headers for retrieving the request ID. + * @return trace status of the request based on the given headers. + */ + virtual TraceStatus getTraceStatus(const Http::RequestHeaderMap& request_headers) PURE; + + /** + * Set the tracing status of a request. + * @param request_headers supplies the incoming request headers for setting the trace status. + * @param status the trace status that should be set for this request. + */ + virtual void setTraceStatus(Http::RequestHeaderMap& request_headers, TraceStatus status) PURE; +}; + +using RequestIDExtensionSharedPtr = std::shared_ptr; + +} // namespace Http +} // namespace Envoy diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index afd11f05d5d6..229ad3019523 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -117,6 +117,7 @@ envoy_cc_library( ":connection_balancer_interface", ":connection_interface", ":listen_socket_interface", + "//include/envoy/access_log:access_log_interface", "//include/envoy/stats:stats_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/include/envoy/network/connection_handler.h b/include/envoy/network/connection_handler.h index 9687cadbd3eb..eefb69e7dd96 100644 --- a/include/envoy/network/connection_handler.h +++ b/include/envoy/network/connection_handler.h @@ -97,9 +97,19 @@ class ConnectionHandler { virtual Listener* listener() PURE; /** - * Destroy the actual Listener it wraps. + * Temporarily stop listening according to implementation's own definition. */ - virtual void destroy() PURE; + virtual void pauseListening() PURE; + + /** + * Resume listening according to implementation's own definition. + */ + virtual void resumeListening() PURE; + + /** + * Stop listening according to implementation's own definition. + */ + virtual void shutdownListener() PURE; }; using ActiveListenerPtr = std::unique_ptr; diff --git a/include/envoy/network/filter.h b/include/envoy/network/filter.h index 42cd0a945779..f929f0472afd 100644 --- a/include/envoy/network/filter.h +++ b/include/envoy/network/filter.h @@ -271,6 +271,17 @@ class ListenerFilterCallbacks { virtual void continueFilterChain(bool success) PURE; }; +/** + * Interface for a listener filter matching with incoming traffic. + */ +class ListenerFilterMatcher { +public: + virtual ~ListenerFilterMatcher() = default; + virtual bool matches(Network::ListenerFilterCallbacks& cb) const PURE; +}; +using ListenerFilterMatcherPtr = std::unique_ptr; +using ListenerFilterMatcherSharedPtr = std::shared_ptr; + /** * Listener Filter */ @@ -299,9 +310,11 @@ class ListenerFilterManager { /** * Add a filter to the listener. Filters are invoked in FIFO order (the filter added * first is called first). + * @param listener_filter_matcher supplies the matcher to decide when filter is enabled. * @param filter supplies the filter being added. */ - virtual void addAcceptFilter(ListenerFilterPtr&& filter) PURE; + virtual void addAcceptFilter(const ListenerFilterMatcherSharedPtr& listener_filter_matcher, + ListenerFilterPtr&& filter) PURE; }; /** @@ -335,6 +348,14 @@ class FilterChain { using FilterChainSharedPtr = std::shared_ptr; +/** + * A filter chain that can be drained. + */ +class DrainableFilterChain : public FilterChain { +public: + virtual void startDraining() PURE; +}; + /** * Interface for searching through configured filter chains. */ diff --git a/include/envoy/network/io_handle.h b/include/envoy/network/io_handle.h index 6965f6a14630..132912218c52 100644 --- a/include/envoy/network/io_handle.h +++ b/include/envoy/network/io_handle.h @@ -1,18 +1,26 @@ #pragma once +#include + #include "envoy/api/io_error.h" #include "envoy/common/platform.h" #include "envoy/common/pure.h" +#include "absl/container/fixed_array.h" + namespace Envoy { namespace Buffer { struct RawSlice; } // namespace Buffer +using RawSliceArrays = absl::FixedArray>; + namespace Network { namespace Address { class Instance; class Ip; + +using InstanceConstSharedPtr = std::shared_ptr; } // namespace Address /** @@ -74,21 +82,37 @@ class IoHandle { int flags, const Address::Ip* self_ip, const Address::Instance& peer_address) PURE; + struct RecvMsgPerPacketInfo { + // The destination address from transport header. + Address::InstanceConstSharedPtr local_address_; + // The the source address from transport header. + Address::InstanceConstSharedPtr peer_address_; + // The payload length of this packet. + unsigned int msg_len_{0}; + }; + + /** + * The output parameter type for recvmsg and recvmmsg. + */ struct RecvMsgOutput { /* + * @param num_packets_per_call is the max number of packets allowed per + * recvmmsg call. For recvmsg call, any value larger than 0 is allowed, but + * only one packet will be returned. * @param dropped_packets points to a variable to store how many packets are * dropped so far. If nullptr, recvmsg() won't try to get this information * from transport header. */ - RecvMsgOutput(uint32_t* dropped_packets) : dropped_packets_(dropped_packets) {} + RecvMsgOutput(size_t num_packets_per_call, uint32_t* dropped_packets) + : dropped_packets_(dropped_packets), msg_(num_packets_per_call) {} // If not nullptr, its value is the total number of packets dropped. recvmsg() will update it // when more packets are dropped. uint32_t* dropped_packets_; - // The destination address from transport header. - std::shared_ptr local_address_; - // The the source address from transport header. - std::shared_ptr peer_address_; + + // Packet headers for each received packet. It's populated according to packet receive order. + // Only the first entry is used to return per packet information by recvmsg. + absl::FixedArray msg_; }; /** @@ -104,6 +128,22 @@ class IoHandle { */ virtual Api::IoCallUint64Result recvmsg(Buffer::RawSlice* slices, const uint64_t num_slice, uint32_t self_port, RecvMsgOutput& output) PURE; + + /** + * If the platform supports, receive multiple messages into given slices, output overflow, + * source/destination addresses per message via passed-in parameters upon success. + * @param slices are the receive buffers for the messages. Each message + * received are stored in an individual entry of |slices|. + * @param self_port is the same as the one in recvmsg(). + * @param output is modified upon each call and each message received. + */ + virtual Api::IoCallUint64Result recvmmsg(RawSliceArrays& slices, uint32_t self_port, + RecvMsgOutput& output) PURE; + + /** + * return true if the platform supports recvmmsg() and sendmmsg(). + */ + virtual bool supportsMmsg() const PURE; }; using IoHandlePtr = std::unique_ptr; diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index f5a85b56be81..2f511eb99a77 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -4,6 +4,7 @@ #include #include +#include "envoy/access_log/access_log.h" #include "envoy/api/io_error.h" #include "envoy/common/exception.h" #include "envoy/config/core/v3/base.pb.h" @@ -142,6 +143,11 @@ class ListenerConfig { * though the implementation may be a NOP balancer. */ virtual ConnectionBalancer& connectionBalancer() PURE; + + /** + * @return std::vector access logs emitted by the listener. + */ + virtual const std::vector& accessLogs() const PURE; }; /** diff --git a/include/envoy/network/resolver.h b/include/envoy/network/resolver.h index 457f65700c7e..dcdcfe8a8c92 100644 --- a/include/envoy/network/resolver.h +++ b/include/envoy/network/resolver.h @@ -19,7 +19,7 @@ namespace Address { */ class Resolver : public Config::UntypedFactory { public: - virtual ~Resolver() = default; + ~Resolver() override = default; /** * Resolve a custom address string and port to an Address::Instance. diff --git a/include/envoy/registry/registry.h b/include/envoy/registry/registry.h index 78942de6618d..30be99b13c2a 100644 --- a/include/envoy/registry/registry.h +++ b/include/envoy/registry/registry.h @@ -372,40 +372,89 @@ template class FactoryRegistry : public Logger::Loggable a function that will restore the previously registered factories + * (by name or type). */ - static Base* replaceFactoryForTest(Base& factory) { + static std::function replaceFactoryForTest(Base& factory) { + // The by-type map is lazily initialized. Create it before any modifications + // are made to the by-name map. + factoriesByType(); + + Base* prev_by_name = nullptr; auto it = factories().find(factory.name()); - Base* displaced = nullptr; if (it != factories().end()) { - displaced = it->second; + prev_by_name = it->second; factories().erase(it); - } - factories().emplace(factory.name(), &factory); - RELEASE_ASSERT(getFactory(factory.name()) == &factory, ""); + factoriesByType().erase(prev_by_name->configType()); - auto config_type = factory.configType(); - Base* prev = getFactoryByType(config_type); - if (prev != nullptr) { - factoriesByType().emplace(config_type, &factory); + ENVOY_LOG_MISC( + info, "Factory '{}' (type '{}') displaced-by-name with test factory '{}' (type '{}')", + prev_by_name->name(), prev_by_name->configType(), factory.name(), factory.configType()); } - return displaced; - } + // Ignore empty config types and ignore test-registered factories that are using the Struct + // type. + // TODO(zuercher): convert static factory registrations in tests to use InjectFactory and + // remove the struct check. + bool valid_config_type = + !factory.configType().empty() && factory.configType() != "google.protobuf.Struct"; + + Base* prev_by_type = nullptr; + if (valid_config_type) { + // It's possible the that no factory was replaced by-name, but that the replacement factory + // is displacing a factory by type. Completely remove the factory by type. + auto type_it = factoriesByType().find(factory.configType()); + if (type_it != factoriesByType().end()) { + prev_by_type = type_it->second; + ASSERT(prev_by_type != nullptr); + + factoriesByType().erase(type_it); + + factories().erase(prev_by_type->name()); + + ENVOY_LOG_MISC( + info, "Factory '{}' (type '{}') displaced-by-type with test factory '{}' (type '{}')", + prev_by_type->name(), prev_by_type->configType(), factory.name(), factory.configType()); + } + } - /** - * Remove a factory by name. This method should only be used for testing purposes. - * @param name is the name of the factory to remove. - */ - static void removeFactoryForTest(absl::string_view name, absl::string_view config_type) { - auto result = factories().erase(name); - RELEASE_ASSERT(result == 1, ""); + Base* replacement = &factory; - Base* prev = getFactoryByType(config_type); - if (prev != nullptr) { - factoriesByType().erase(config_type); + factories().emplace(factory.name(), replacement); + RELEASE_ASSERT(getFactory(factory.name()) == replacement, ""); + + if (valid_config_type) { + factoriesByType().emplace(factory.configType(), replacement); + RELEASE_ASSERT(getFactoryByType(factory.configType()) == replacement, ""); } + + return [replacement, prev_by_name, prev_by_type, valid_config_type]() { + factories().erase(replacement->name()); + if (valid_config_type) { + factoriesByType().erase(replacement->configType()); + } + + if (prev_by_name) { + factories().emplace(prev_by_name->name(), prev_by_name); + if (!prev_by_name->configType().empty()) { + factoriesByType().emplace(prev_by_name->configType(), prev_by_name); + } + + ENVOY_LOG_MISC(warn, "Restored factory '{}' (type '{}'), formerly displaced-by-name", + prev_by_name->name(), prev_by_name->configType()); + } + + if (prev_by_type) { + factories().emplace(prev_by_type->name(), prev_by_type); + if (!prev_by_type->configType().empty()) { + factoriesByType().emplace(prev_by_type->configType(), prev_by_type); + } + + ENVOY_LOG_MISC(warn, "Restored factory '{}' (type '{}'), formerly displaced-by-type", + prev_by_type->name(), prev_by_type->configType()); + } + }; } }; diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index d3b9c3c50089..13032173d929 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -370,6 +370,24 @@ class ShadowPolicy { using ShadowPolicyPtr = std::unique_ptr; +/** + * All virtual cluster stats. @see stats_macro.h + */ +#define ALL_VIRTUAL_CLUSTER_STATS(COUNTER) \ + COUNTER(upstream_rq_retry) \ + COUNTER(upstream_rq_retry_limit_exceeded) \ + COUNTER(upstream_rq_retry_overflow) \ + COUNTER(upstream_rq_retry_success) \ + COUNTER(upstream_rq_timeout) \ + COUNTER(upstream_rq_total) + +/** + * Struct definition for all virtual cluster stats. @see stats_macro.h + */ +struct VirtualClusterStats { + ALL_VIRTUAL_CLUSTER_STATS(GENERATE_COUNTER_STRUCT) +}; + /** * Virtual cluster definition (allows splitting a virtual host into virtual clusters orthogonal to * routes for stat tracking and priority purposes). @@ -382,6 +400,15 @@ class VirtualCluster { * @return the stat-name of the virtual cluster. */ virtual Stats::StatName statName() const PURE; + + /** + * @return VirtualClusterStats& strongly named stats for this virtual cluster. + */ + virtual VirtualClusterStats& stats() const PURE; + + static VirtualClusterStats generateStats(Stats::Scope& scope) { + return {ALL_VIRTUAL_CLUSTER_STATS(POOL_COUNTER(scope))}; + } }; class RateLimitPolicy; diff --git a/include/envoy/server/BUILD b/include/envoy/server/BUILD index caaced47a0dd..69fb92391c03 100644 --- a/include/envoy/server/BUILD +++ b/include/envoy/server/BUILD @@ -158,6 +158,7 @@ envoy_cc_library( hdrs = ["filter_config.h"], deps = [ ":admin_interface", + ":drain_manager_interface", ":lifecycle_notifier_interface", ":process_context_interface", "//include/envoy/access_log:access_log_interface", @@ -244,6 +245,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "request_id_extension_config_interface", + hdrs = ["request_id_extension_config.h"], + deps = [ + ":filter_config_interface", + "//include/envoy/http:request_id_extension_interface", + "//source/common/protobuf", + ], +) + envoy_cc_library( name = "resource_monitor_config_interface", hdrs = ["resource_monitor_config.h"], diff --git a/include/envoy/server/access_log_config.h b/include/envoy/server/access_log_config.h index 2fc07ac8d6e1..158c442acc49 100644 --- a/include/envoy/server/access_log_config.h +++ b/include/envoy/server/access_log_config.h @@ -18,7 +18,7 @@ namespace Configuration { */ class AccessLogInstanceFactory : public Config::TypedFactory { public: - virtual ~AccessLogInstanceFactory() = default; + ~AccessLogInstanceFactory() override = default; /** * Create a particular AccessLog::Instance implementation from a config proto. If the diff --git a/include/envoy/server/active_udp_listener_config.h b/include/envoy/server/active_udp_listener_config.h index 52846679a517..2e027dc4d747 100644 --- a/include/envoy/server/active_udp_listener_config.h +++ b/include/envoy/server/active_udp_listener_config.h @@ -14,7 +14,7 @@ namespace Server { */ class ActiveUdpListenerConfigFactory : public Config::UntypedFactory { public: - virtual ~ActiveUdpListenerConfigFactory() = default; + ~ActiveUdpListenerConfigFactory() override = default; virtual ProtobufTypes::MessagePtr createEmptyConfigProto() PURE; diff --git a/include/envoy/server/admin.h b/include/envoy/server/admin.h index 5f0327ce8c8a..62b2604fda78 100644 --- a/include/envoy/server/admin.h +++ b/include/envoy/server/admin.h @@ -49,6 +49,12 @@ class AdminStream { * request. */ virtual const Http::RequestHeaderMap& getRequestHeaders() const PURE; + + /** + * Return the HTTP/1 stream encoder options if applicable. If the stream is not HTTP/1 returns + * absl::nullopt. + */ + virtual Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() PURE; }; /** diff --git a/include/envoy/server/filter_config.h b/include/envoy/server/filter_config.h index ff8ccbb6eb4a..75d8dd24d371 100644 --- a/include/envoy/server/filter_config.h +++ b/include/envoy/server/filter_config.h @@ -14,6 +14,7 @@ #include "envoy/network/filter.h" #include "envoy/runtime/runtime.h" #include "envoy/server/admin.h" +#include "envoy/server/drain_manager.h" #include "envoy/server/lifecycle_notifier.h" #include "envoy/server/overload_manager.h" #include "envoy/server/process_context.h" @@ -116,6 +117,11 @@ class ServerFactoryContext : public virtual CommonFactoryContext { * @return the server-wide grpc context. */ virtual Grpc::Context& grpcContext() PURE; + + /** + * @return DrainManager& the server-wide drain manager. + */ + virtual Envoy::Server::DrainManager& drainManager() PURE; }; /** @@ -218,7 +224,15 @@ class FactoryContext : public virtual CommonFactoryContext { * The life time is no longer than the owning listener. It should be used to create * NetworkFilterChain. */ -class FilterChainFactoryContext : public virtual FactoryContext {}; +class FilterChainFactoryContext : public virtual FactoryContext { +public: + /** + * Set the flag that all attached filter chains will be destroyed. + */ + virtual void startDraining() PURE; +}; + +using FilterChainFactoryContextPtr = std::unique_ptr; /** * An implementation of FactoryContext. The life time should cover the lifetime of the filter chains @@ -237,7 +251,7 @@ class ListenerFactoryContext : public virtual FactoryContext { */ class ListenerFilterConfigFactoryBase : public Config::TypedFactory { public: - virtual ~ListenerFilterConfigFactoryBase() = default; + ~ListenerFilterConfigFactoryBase() override = default; }; /** @@ -253,13 +267,15 @@ class NamedListenerFilterConfigFactory : public ListenerFilterConfigFactoryBase * produce a factory with the provided parameters, it should throw an EnvoyException in the case * of general error or a Json::Exception if the json configuration is erroneous. The returned * callback should always be initialized. - * @param config supplies the general protobuf configuration for the filter + * @param config supplies the general protobuf configuration for the filter. + * @param listener_filter_matcher supplies the matcher to decide when filter is enabled. * @param context supplies the filter's context. * @return Network::ListenerFilterFactoryCb the factory creation function. */ - virtual Network::ListenerFilterFactoryCb - createFilterFactoryFromProto(const Protobuf::Message& config, - ListenerFactoryContext& context) PURE; + virtual Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto( + const Protobuf::Message& config, + const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + ListenerFactoryContext& context) PURE; std::string category() const override { return "envoy.filters.listener"; } }; @@ -293,7 +309,7 @@ class NamedUdpListenerFilterConfigFactory : public ListenerFilterConfigFactoryBa */ class ProtocolOptionsFactory : public Config::TypedFactory { public: - virtual ~ProtocolOptionsFactory() = default; + ~ProtocolOptionsFactory() override = default; /** * Create a particular filter's protocol specific options implementation. If the factory diff --git a/include/envoy/server/health_checker_config.h b/include/envoy/server/health_checker_config.h index 28ed6fe27de6..5994e37f231b 100644 --- a/include/envoy/server/health_checker_config.h +++ b/include/envoy/server/health_checker_config.h @@ -58,7 +58,7 @@ class HealthCheckerFactoryContext { */ class CustomHealthCheckerFactory : public Config::TypedFactory { public: - virtual ~CustomHealthCheckerFactory() = default; + ~CustomHealthCheckerFactory() override = default; /** * Creates a particular custom health checker factory implementation. diff --git a/include/envoy/server/instance.h b/include/envoy/server/instance.h index a8ab3b365ebb..83996f09db05 100644 --- a/include/envoy/server/instance.h +++ b/include/envoy/server/instance.h @@ -75,9 +75,9 @@ class Instance { virtual void drainListeners() PURE; /** - * @return const DrainManager& singleton for use by the entire server. + * @return DrainManager& singleton for use by the entire server. */ - virtual const DrainManager& drainManager() PURE; + virtual DrainManager& drainManager() PURE; /** * @return AccessLogManager for use by the entire server. diff --git a/include/envoy/server/request_id_extension_config.h b/include/envoy/server/request_id_extension_config.h new file mode 100644 index 000000000000..3a52214fcc54 --- /dev/null +++ b/include/envoy/server/request_id_extension_config.h @@ -0,0 +1,35 @@ +#pragma once + +#include + +#include "envoy/http/request_id_extension.h" +#include "envoy/server/filter_config.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +/** + * Implemented for each RequestIDExtension and registered via Registry::registerFactory + * or the convenience class RegisterFactory. + */ +class RequestIDExtensionFactory : public Envoy::Config::TypedFactory { +public: + ~RequestIDExtensionFactory() override = default; + + /** + * Create a Request ID Extension instance from the provided config proto. + * @param config the custom configuration for this request id extension type. + * @param context general filter context through which persistent resources can be accessed. + */ + virtual Http::RequestIDExtensionSharedPtr createExtensionInstance(const Protobuf::Message& config, + FactoryContext& context) PURE; + + std::string category() const override { return "envoy.request_id_extension"; } +}; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/include/envoy/server/resource_monitor_config.h b/include/envoy/server/resource_monitor_config.h index 2b7021a467d3..245e9323d6b7 100644 --- a/include/envoy/server/resource_monitor_config.h +++ b/include/envoy/server/resource_monitor_config.h @@ -41,7 +41,7 @@ class ResourceMonitorFactoryContext { */ class ResourceMonitorFactory : public Config::TypedFactory { public: - virtual ~ResourceMonitorFactory() = default; + ~ResourceMonitorFactory() override = default; /** * Create a particular resource monitor implementation. diff --git a/include/envoy/server/tracer_config.h b/include/envoy/server/tracer_config.h index 2eb4f2ce2eba..25f8b664d949 100644 --- a/include/envoy/server/tracer_config.h +++ b/include/envoy/server/tracer_config.h @@ -38,18 +38,25 @@ using TracerFactoryContextPtr = std::unique_ptr; */ class TracerFactory : public Config::TypedFactory { public: - virtual ~TracerFactory() = default; + ~TracerFactory() override = default; /** * Create a particular HttpTracer implementation. If the implementation is unable to produce an * HttpTracer with the provided parameters, it should throw an EnvoyException in the case of * general error or a Json::Exception if the json configuration is erroneous. The returned * pointer should always be valid. + * + * NOTE: Due to the corner case of OpenCensus, who can only support a single tracing + * configuration per entire process, the returned HttpTracer instance is not guaranteed + * to be unique. + * That is why the return type has been changed to std::shared_ptr<> instead of a more + * idiomatic std::unique_ptr<>. + * * @param config supplies the proto configuration for the HttpTracer * @param context supplies the factory context */ - virtual Tracing::HttpTracerPtr createHttpTracer(const Protobuf::Message& config, - TracerFactoryContext& context) PURE; + virtual Tracing::HttpTracerSharedPtr createHttpTracer(const Protobuf::Message& config, + TracerFactoryContext& context) PURE; std::string category() const override { return "envoy.tracers"; } }; diff --git a/include/envoy/server/transport_socket_config.h b/include/envoy/server/transport_socket_config.h index 1dd0e4ab7b9b..a3dd4d5dac6d 100644 --- a/include/envoy/server/transport_socket_config.h +++ b/include/envoy/server/transport_socket_config.h @@ -103,7 +103,7 @@ class TransportSocketFactoryContext { class TransportSocketConfigFactory : public Config::TypedFactory { public: - virtual ~TransportSocketConfigFactory() = default; + ~TransportSocketConfigFactory() override = default; }; /** diff --git a/include/envoy/server/wasm_config.h b/include/envoy/server/wasm_config.h index d63ebc004d3c..8402920ebe20 100644 --- a/include/envoy/server/wasm_config.h +++ b/include/envoy/server/wasm_config.h @@ -47,7 +47,7 @@ class WasmFactory { * @throw EnvoyException if the implementation is unable to produce an instance with * the provided parameters. */ - virtual void createWasm(const envoy::extensions::wasm::v3::WasmService& config, + virtual void createWasm(const envoy::config::wasm::v3::WasmService& config, WasmFactoryContext& context, CreateWasmServiceCallback&& cb) PURE; }; diff --git a/include/envoy/singleton/manager.h b/include/envoy/singleton/manager.h index 212c656ecd7e..995c333f8744 100644 --- a/include/envoy/singleton/manager.h +++ b/include/envoy/singleton/manager.h @@ -17,7 +17,7 @@ namespace Singleton { */ class Registration : public Config::UntypedFactory { public: - virtual ~Registration() = default; + ~Registration() override = default; std::string category() const override { return "envoy.singleton"; } }; diff --git a/include/envoy/ssl/context_config.h b/include/envoy/ssl/context_config.h index 124ef7730750..9196a5a294a9 100644 --- a/include/envoy/ssl/context_config.h +++ b/include/envoy/ssl/context_config.h @@ -129,6 +129,11 @@ class ServerContextConfig : public virtual ContextConfig { * Session timeout is used to specify lifetime hint of tls tickets. */ virtual absl::optional sessionTimeout() const PURE; + + /** + * @return True if stateless TLS session resumption is disabled, false otherwise. + */ + virtual bool disableStatelessSessionResumption() const PURE; }; using ServerContextConfigPtr = std::unique_ptr; diff --git a/include/envoy/ssl/context_manager.h b/include/envoy/ssl/context_manager.h index b30ef3a0edb3..77f6ead17c8c 100644 --- a/include/envoy/ssl/context_manager.h +++ b/include/envoy/ssl/context_manager.h @@ -53,7 +53,7 @@ using ContextManagerPtr = std::unique_ptr; class ContextManagerFactory : public Config::UntypedFactory { public: - virtual ~ContextManagerFactory() = default; + ~ContextManagerFactory() override = default; virtual ContextManagerPtr createContextManager(TimeSource& time_source) PURE; // There could be only one factory thus the name is static. diff --git a/include/envoy/ssl/private_key/private_key_config.h b/include/envoy/ssl/private_key/private_key_config.h index 415584df4d42..6a563e38b10f 100644 --- a/include/envoy/ssl/private_key/private_key_config.h +++ b/include/envoy/ssl/private_key/private_key_config.h @@ -12,7 +12,7 @@ namespace Ssl { class PrivateKeyMethodProviderInstanceFactory : public Config::UntypedFactory { public: - virtual ~PrivateKeyMethodProviderInstanceFactory() = default; + ~PrivateKeyMethodProviderInstanceFactory() override = default; /** * Create a particular PrivateKeyMethodProvider implementation. If the implementation is diff --git a/include/envoy/stats/scope.h b/include/envoy/stats/scope.h index 804fb0441212..03a232063c13 100644 --- a/include/envoy/stats/scope.h +++ b/include/envoy/stats/scope.h @@ -70,7 +70,7 @@ class Scope { * @param name The name, expressed as a string. * @return a counter within the scope's namespace. */ - virtual Counter& counter(const std::string& name) PURE; + virtual Counter& counterFromString(const std::string& name) PURE; /** * Creates a Gauge from the stat name. Tag extraction will be performed on the name. @@ -99,7 +99,7 @@ class Scope { * @param import_mode Whether hot-restart should accumulate this value. * @return a gauge within the scope's namespace. */ - virtual Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) PURE; + virtual Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) PURE; /** * @return a null gauge within the scope's namespace. @@ -134,7 +134,7 @@ class Scope { * @param unit The unit of measurement. * @return a histogram within the scope's namespace with a particular value type. */ - virtual Histogram& histogram(const std::string& name, Histogram::Unit unit) PURE; + virtual Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) PURE; /** * @param The name of the stat, obtained from the SymbolTable. diff --git a/include/envoy/stats/stats_macros.h b/include/envoy/stats/stats_macros.h index 1cd3212e89f9..77d2243903f4 100644 --- a/include/envoy/stats/stats_macros.h +++ b/include/envoy/stats/stats_macros.h @@ -54,9 +54,9 @@ static inline std::string statPrefixJoin(absl::string_view prefix, absl::string_ return absl::StrCat(prefix, ".", token); } -#define POOL_COUNTER_PREFIX(POOL, PREFIX) (POOL).counter(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_ -#define POOL_GAUGE_PREFIX(POOL, PREFIX) (POOL).gauge(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_MODE_ -#define POOL_HISTOGRAM_PREFIX(POOL, PREFIX) (POOL).histogram(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_UNIT_ +#define POOL_COUNTER_PREFIX(POOL, PREFIX) (POOL).counterFromString(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_ +#define POOL_GAUGE_PREFIX(POOL, PREFIX) (POOL).gaugeFromString(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_MODE_ +#define POOL_HISTOGRAM_PREFIX(POOL, PREFIX) (POOL).histogramFromString(Envoy::statPrefixJoin(PREFIX, FINISH_STAT_DECL_UNIT_ #define POOL_COUNTER(POOL) POOL_COUNTER_PREFIX(POOL, "") #define POOL_GAUGE(POOL) POOL_GAUGE_PREFIX(POOL, "") diff --git a/include/envoy/stream_info/BUILD b/include/envoy/stream_info/BUILD index fbaf756c785c..63fa4b47ba5a 100644 --- a/include/envoy/stream_info/BUILD +++ b/include/envoy/stream_info/BUILD @@ -17,6 +17,7 @@ envoy_cc_library( "//include/envoy/common:time_interface", "//include/envoy/http:header_map_interface", "//include/envoy/http:protocol_interface", + "//include/envoy/http:request_id_extension_interface", "//include/envoy/ssl:connection_interface", "//include/envoy/upstream:host_description_interface", "//source/common/common:assert_lib", diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index 37f80b67eebc..1afda3a2336a 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -9,6 +9,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/http/header_map.h" #include "envoy/http/protocol.h" +#include "envoy/http/request_id_extension.h" #include "envoy/ssl/connection.h" #include "envoy/stream_info/filter_state.h" #include "envoy/upstream/host_description.h" @@ -142,6 +143,8 @@ struct ResponseCodeDetailValues { // indicates that original "success" headers may have been sent downstream // despite the subsequent failure. const std::string LateUpstreamReset = "upstream_reset_after_response_started"; + // The connection is rejected due to no matching filter chain. + const std::string FilterChainNotFound = "filter_chain_not_found"; }; using ResponseCodeDetails = ConstSingleton; @@ -538,6 +541,16 @@ class StreamInfo { * no route or cluster does not exist(nullptr), and set to a valid cluster(not nullptr). */ virtual absl::optional upstreamClusterInfo() const PURE; + + /** + * @param utils The requestID utils implementation this stream uses + */ + virtual void setRequestIDExtension(Http::RequestIDExtensionSharedPtr utils) PURE; + + /** + * @return A shared pointer to the request ID utils for this stream + */ + virtual Http::RequestIDExtensionSharedPtr getRequestIDExtension() const PURE; }; } // namespace StreamInfo diff --git a/include/envoy/tcp/conn_pool.h b/include/envoy/tcp/conn_pool.h index 43eba8bddae7..0d16c9afbfe1 100644 --- a/include/envoy/tcp/conn_pool.h +++ b/include/envoy/tcp/conn_pool.h @@ -203,6 +203,11 @@ class Instance : public Event::DeferredDeletable { * should be done by resetting the connection. */ virtual Cancellable* newConnection(Callbacks& callbacks) PURE; + + /** + * @return the description of the host this connection pool is for. + */ + virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; }; using InstancePtr = std::unique_ptr; diff --git a/include/envoy/tracing/http_tracer.h b/include/envoy/tracing/http_tracer.h index ca9e02d32bcc..63da639e84ee 100644 --- a/include/envoy/tracing/http_tracer.h +++ b/include/envoy/tracing/http_tracer.h @@ -190,9 +190,6 @@ class HttpTracer { const Tracing::Decision tracing_decision) PURE; }; -// HttpTracerPtr is intended for use by Server::Configuration::TracerFactory implementations. -using HttpTracerPtr = std::unique_ptr; -// HttpTracerSharedPtr should be used wherever an HttpTracer instance is necessary. using HttpTracerSharedPtr = std::shared_ptr; } // namespace Tracing diff --git a/include/envoy/upstream/cluster_factory.h b/include/envoy/upstream/cluster_factory.h index 2ec16f1c9ff9..389a804ba044 100644 --- a/include/envoy/upstream/cluster_factory.h +++ b/include/envoy/upstream/cluster_factory.h @@ -126,7 +126,7 @@ class ClusterFactoryContext { */ class ClusterFactory : public Config::UntypedFactory { public: - virtual ~ClusterFactory() = default; + ~ClusterFactory() override = default; /** * Create a new instance of cluster. If the implementation is unable to produce a cluster instance diff --git a/include/envoy/upstream/host_description.h b/include/envoy/upstream/host_description.h index be8de6d3c9a1..bd0714bae455 100644 --- a/include/envoy/upstream/host_description.h +++ b/include/envoy/upstream/host_description.h @@ -97,6 +97,11 @@ class HostDescription { */ virtual HealthCheckHostMonitor& healthChecker() const PURE; + /** + * @return The hostname used as the host header for health checking. + */ + virtual const std::string& hostnameForHealthChecks() const PURE; + /** * @return the hostname associated with the host if any. * Empty string "" indicates that hostname is not a DNS name. diff --git a/include/envoy/upstream/retry.h b/include/envoy/upstream/retry.h index 3e6ca2890a90..4a7af89201bd 100644 --- a/include/envoy/upstream/retry.h +++ b/include/envoy/upstream/retry.h @@ -78,7 +78,7 @@ using RetryHostPredicateSharedPtr = std::shared_ptr; */ class RetryPriorityFactory : public Config::TypedFactory { public: - virtual ~RetryPriorityFactory() = default; + ~RetryPriorityFactory() override = default; virtual RetryPrioritySharedPtr createRetryPriority(const Protobuf::Message& config, @@ -93,7 +93,7 @@ class RetryPriorityFactory : public Config::TypedFactory { */ class RetryHostPredicateFactory : public Config::TypedFactory { public: - virtual ~RetryHostPredicateFactory() = default; + ~RetryHostPredicateFactory() override = default; virtual RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message& config, uint32_t retry_count) PURE; diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index d8a41bae7d3d..553f9b33fd8b 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -575,6 +575,7 @@ class PrioritySet { COUNTER(upstream_rq_pending_total) \ COUNTER(upstream_rq_per_try_timeout) \ COUNTER(upstream_rq_retry) \ + COUNTER(upstream_rq_retry_limit_exceeded) \ COUNTER(upstream_rq_retry_overflow) \ COUNTER(upstream_rq_retry_success) \ COUNTER(upstream_rq_rx_reset) \ diff --git a/source/common/access_log/BUILD b/source/common/access_log/BUILD index 1122c55583f8..06d3a165fb92 100644 --- a/source/common/access_log/BUILD +++ b/source/common/access_log/BUILD @@ -29,7 +29,6 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/http:utility_lib", "//source/common/protobuf:utility_lib", - "//source/common/runtime:uuid_util_lib", "//source/common/stream_info:stream_info_lib", "//source/common/tracing:http_tracer_lib", "@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto", @@ -63,6 +62,7 @@ envoy_cc_library( "//include/envoy/access_log:access_log_interface", "//include/envoy/api:api_interface", "//source/common/buffer:buffer_lib", + "//source/common/common:logger_lib", "//source/common/common:thread_lib", ], ) diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index 7237fb061fc4..345041824b24 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -1,7 +1,6 @@ #include "common/access_log/access_log_formatter.h" -#include - +#include #include #include #include @@ -438,15 +437,23 @@ class StreamInfoAddressFieldExtractor : public StreamInfoFormatter::FieldExtract std::function; static std::unique_ptr withPort(FieldExtractor f) { - return std::make_unique(f, true); + return std::make_unique( + f, StreamInfoFormatter::StreamInfoAddressFieldExtractionType::WithPort); } static std::unique_ptr withoutPort(FieldExtractor f) { - return std::make_unique(f, false); + return std::make_unique( + f, StreamInfoFormatter::StreamInfoAddressFieldExtractionType::WithoutPort); + } + + static std::unique_ptr justPort(FieldExtractor f) { + return std::make_unique( + f, StreamInfoFormatter::StreamInfoAddressFieldExtractionType::JustPort); } - StreamInfoAddressFieldExtractor(FieldExtractor f, bool include_port) - : field_extractor_(f), include_port_(include_port) {} + StreamInfoAddressFieldExtractor( + FieldExtractor f, StreamInfoFormatter::StreamInfoAddressFieldExtractionType extraction_type) + : field_extractor_(f), extraction_type_(extraction_type) {} // StreamInfoFormatter::FieldExtractor std::string extract(const StreamInfo::StreamInfo& stream_info) const override { @@ -468,15 +475,19 @@ class StreamInfoAddressFieldExtractor : public StreamInfoFormatter::FieldExtract private: std::string toString(const Network::Address::Instance& address) const { - if (include_port_) { + switch (extraction_type_) { + case StreamInfoFormatter::StreamInfoAddressFieldExtractionType::WithoutPort: + return StreamInfo::Utility::formatDownstreamAddressNoPort(address); + case StreamInfoFormatter::StreamInfoAddressFieldExtractionType::JustPort: + return StreamInfo::Utility::formatDownstreamAddressJustPort(address); + case StreamInfoFormatter::StreamInfoAddressFieldExtractionType::WithPort: + default: return address.asString(); } - - return StreamInfo::Utility::formatDownstreamAddressNoPort(address); } FieldExtractor field_extractor_; - const bool include_port_; + const StreamInfoFormatter::StreamInfoAddressFieldExtractionType extraction_type_; }; // Ssl::ConnectionInfo std::string field extractor. @@ -601,6 +612,11 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { [](const Envoy::StreamInfo::StreamInfo& stream_info) { return stream_info.downstreamLocalAddress(); }); + } else if (field_name == "DOWNSTREAM_LOCAL_PORT") { + field_extractor_ = StreamInfoAddressFieldExtractor::justPort( + [](const Envoy::StreamInfo::StreamInfo& stream_info) { + return stream_info.downstreamLocalAddress(); + }); } else if (field_name == "DOWNSTREAM_REMOTE_ADDRESS") { field_extractor_ = StreamInfoAddressFieldExtractor::withPort([](const StreamInfo::StreamInfo& stream_info) { diff --git a/source/common/access_log/access_log_formatter.h b/source/common/access_log/access_log_formatter.h index afcc1a664b59..86405ec2c3f1 100644 --- a/source/common/access_log/access_log_formatter.h +++ b/source/common/access_log/access_log_formatter.h @@ -231,6 +231,8 @@ class StreamInfoFormatter : public FormatterProvider { }; using FieldExtractorPtr = std::unique_ptr; + enum class StreamInfoAddressFieldExtractionType { WithPort, WithoutPort, JustPort }; + private: FieldExtractorPtr field_extractor_; }; diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index 12b5baacb349..a0f69d1cf90e 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -20,7 +20,6 @@ #include "common/http/headers.h" #include "common/http/utility.h" #include "common/protobuf/utility.h" -#include "common/runtime/uuid_util.h" #include "common/stream_info/utility.h" #include "common/tracing/http_tracer_impl.h" @@ -123,15 +122,14 @@ RuntimeFilter::RuntimeFilter(const envoy::config::accesslog::v3::RuntimeFilter& percent_(config.percent_sampled()), use_independent_randomness_(config.use_independent_randomness()) {} -bool RuntimeFilter::evaluate(const StreamInfo::StreamInfo&, +bool RuntimeFilter::evaluate(const StreamInfo::StreamInfo& stream_info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { - const Http::HeaderEntry* uuid = request_headers.RequestId(); + auto rid_extension = stream_info.getRequestIDExtension(); uint64_t random_value; - // TODO(dnoe): Migrate uuidModBy to take string_view (#6580) - if (use_independent_randomness_ || uuid == nullptr || - !UuidUtils::uuidModBy( - std::string(uuid->value().getStringView()), random_value, + if (use_independent_randomness_ || + !rid_extension->modBy( + request_headers, random_value, ProtobufPercentHelper::fractionalPercentDenominatorToInt(percent_.denominator()))) { random_value = random_.random(); } diff --git a/source/common/access_log/access_log_impl.h b/source/common/access_log/access_log_impl.h index 1bdee3967d92..512a957e1dc2 100644 --- a/source/common/access_log/access_log_impl.h +++ b/source/common/access_log/access_log_impl.h @@ -233,7 +233,7 @@ class GrpcStatusFilter : public Filter { */ class ExtensionFilterFactory : public Config::TypedFactory { public: - virtual ~ExtensionFilterFactory() = default; + ~ExtensionFilterFactory() override = default; /** * Create a particular extension filter implementation from a config proto. If the diff --git a/source/common/access_log/access_log_manager_impl.cc b/source/common/access_log/access_log_manager_impl.cc index 20376e6c914f..534b4be0b547 100644 --- a/source/common/access_log/access_log_manager_impl.cc +++ b/source/common/access_log/access_log_manager_impl.cc @@ -11,6 +11,14 @@ namespace Envoy { namespace AccessLog { +AccessLogManagerImpl::~AccessLogManagerImpl() { + for (auto& access_log : access_logs_) { + ENVOY_LOG(debug, "destroying access logger {}", access_log.first); + access_log.second.reset(); + } + ENVOY_LOG(debug, "destroyed access loggers"); +} + void AccessLogManagerImpl::reopen() { for (auto& access_log : access_logs_) { access_log.second->reopen(); @@ -95,9 +103,7 @@ AccessLogFileImpl::~AccessLogFileImpl() { } void AccessLogFileImpl::doWrite(Buffer::Instance& buffer) { - uint64_t num_slices = buffer.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - buffer.getRawSlices(slices.begin(), num_slices); + Buffer::RawSliceVector slices = buffer.getRawSlices(); // We must do the actual writes to disk under lock, so that we don't intermix chunks from // different AccessLogFileImpl pointing to the same underlying file. This can happen either via diff --git a/source/common/access_log/access_log_manager_impl.h b/source/common/access_log/access_log_manager_impl.h index fa3b2579e392..2bf745cf41bf 100644 --- a/source/common/access_log/access_log_manager_impl.h +++ b/source/common/access_log/access_log_manager_impl.h @@ -11,6 +11,7 @@ #include "envoy/stats/store.h" #include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" #include "common/common/thread.h" namespace Envoy { @@ -29,7 +30,7 @@ struct AccessLogFileStats { namespace AccessLog { -class AccessLogManagerImpl : public AccessLogManager { +class AccessLogManagerImpl : public AccessLogManager, Logger::Loggable { public: AccessLogManagerImpl(std::chrono::milliseconds file_flush_interval_msec, Api::Api& api, Event::Dispatcher& dispatcher, Thread::BasicLockable& lock, @@ -38,6 +39,7 @@ class AccessLogManagerImpl : public AccessLogManager { lock_(lock), file_stats_{ ACCESS_LOG_FILE_STATS(POOL_COUNTER_PREFIX(stats_store, "filesystem."), POOL_GAUGE_PREFIX(stats_store, "filesystem."))} {} + ~AccessLogManagerImpl() override; // AccessLog::AccessLogManager void reopen() override; diff --git a/source/common/api/posix/os_sys_calls_impl.cc b/source/common/api/posix/os_sys_calls_impl.cc index 865d0dc964b5..2c00e6998772 100644 --- a/source/common/api/posix/os_sys_calls_impl.cc +++ b/source/common/api/posix/os_sys_calls_impl.cc @@ -45,11 +45,34 @@ SysCallSizeResult OsSysCallsImpl::recv(os_fd_t socket, void* buffer, size_t leng return {rc, rc != -1 ? 0 : errno}; } -SysCallSizeResult OsSysCallsImpl::recvmsg(int sockfd, msghdr* msg, int flags) { +SysCallSizeResult OsSysCallsImpl::recvmsg(os_fd_t sockfd, msghdr* msg, int flags) { const ssize_t rc = ::recvmsg(sockfd, msg, flags); return {rc, rc != -1 ? 0 : errno}; } +SysCallIntResult OsSysCallsImpl::recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, + int flags, struct timespec* timeout) { +#if ENVOY_MMSG_MORE + const int rc = ::recvmmsg(sockfd, msgvec, vlen, flags, timeout); + return {rc, errno}; +#else + UNREFERENCED_PARAMETER(sockfd); + UNREFERENCED_PARAMETER(msgvec); + UNREFERENCED_PARAMETER(vlen); + UNREFERENCED_PARAMETER(flags); + UNREFERENCED_PARAMETER(timeout); + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; +#endif +} + +bool OsSysCallsImpl::supportsMmsg() const { +#if ENVOY_MMSG_MORE + return true; +#else + return false; +#endif +} + SysCallIntResult OsSysCallsImpl::ftruncate(int fd, off_t length) { const int rc = ::ftruncate(fd, length); return {rc, rc != -1 ? 0 : errno}; diff --git a/source/common/api/posix/os_sys_calls_impl.h b/source/common/api/posix/os_sys_calls_impl.h index 8cd2ecb5e03c..fc63bbc07ca4 100644 --- a/source/common/api/posix/os_sys_calls_impl.h +++ b/source/common/api/posix/os_sys_calls_impl.h @@ -19,6 +19,9 @@ class OsSysCallsImpl : public OsSysCalls { SysCallSizeResult readv(os_fd_t fd, const iovec* iov, int num_iov) override; SysCallSizeResult recv(os_fd_t socket, void* buffer, size_t length, int flags) override; SysCallSizeResult recvmsg(os_fd_t sockfd, msghdr* msg, int flags) override; + SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, int flags, + struct timespec* timeout) override; + bool supportsMmsg() const override; SysCallIntResult close(os_fd_t fd) override; SysCallIntResult ftruncate(int fd, off_t length) override; SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd, diff --git a/source/common/api/win32/os_sys_calls_impl.cc b/source/common/api/win32/os_sys_calls_impl.cc index febab4fdcf88..fa8af1a137cb 100644 --- a/source/common/api/win32/os_sys_calls_impl.cc +++ b/source/common/api/win32/os_sys_calls_impl.cc @@ -160,6 +160,16 @@ SysCallSizeResult OsSysCallsImpl::recvmsg(os_fd_t sockfd, msghdr* msg, int flags return {bytes_received, 0}; } +SysCallIntResult OsSysCallsImpl::recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, + int flags, struct timespec* timeout) { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; +} + +bool OsSysCallsImpl::supportsMmsg() const { + // Windows doesn't support it. + return false; +} + SysCallIntResult OsSysCallsImpl::ftruncate(int fd, off_t length) { const int rc = ::_chsize_s(fd, length); return {rc, rc == 0 ? 0 : errno}; diff --git a/source/common/api/win32/os_sys_calls_impl.h b/source/common/api/win32/os_sys_calls_impl.h index b39a8416fa9b..1f6b56608b2b 100644 --- a/source/common/api/win32/os_sys_calls_impl.h +++ b/source/common/api/win32/os_sys_calls_impl.h @@ -19,6 +19,9 @@ class OsSysCallsImpl : public OsSysCalls { SysCallSizeResult readv(os_fd_t fd, const iovec* iov, int num_iov) override; SysCallSizeResult recv(os_fd_t socket, void* buffer, size_t length, int flags) override; SysCallSizeResult recvmsg(os_fd_t sockfd, msghdr* msg, int flags) override; + SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, int flags, + struct timespec* timeout) override; + bool supportsMmsg() const override; SysCallIntResult close(os_fd_t fd) override; SysCallIntResult ftruncate(int fd, off_t length) override; SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd, diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index 4aacaeefb1c9..c53a51c02bd0 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -44,10 +44,7 @@ void OwnedImpl::add(absl::string_view data) { add(data.data(), data.size()); } void OwnedImpl::add(const Instance& data) { ASSERT(&data != this); - uint64_t num_slices = data.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - data.getRawSlices(slices.begin(), num_slices); - for (const RawSlice& slice : slices) { + for (const RawSlice& slice : data.getRawSlices()) { add(slice.mem_, slice.len_); } } @@ -169,23 +166,32 @@ void OwnedImpl::drain(uint64_t size) { } } -uint64_t OwnedImpl::getRawSlices(RawSlice* out, uint64_t out_size) const { - uint64_t num_slices = 0; +RawSliceVector OwnedImpl::getRawSlices(absl::optional max_slices) const { + uint64_t max_out = slices_.size(); + if (max_slices.has_value()) { + max_out = std::min(max_out, max_slices.value()); + } + + RawSliceVector raw_slices; + raw_slices.reserve(max_out); for (const auto& slice : slices_) { + if (raw_slices.size() >= max_out) { + break; + } + if (slice->dataSize() == 0) { continue; } - if (num_slices < out_size) { - out[num_slices].mem_ = slice->data(); - out[num_slices].len_ = slice->dataSize(); - } - // Per the definition of getRawSlices in include/envoy/buffer/buffer.h, we need to return - // the total number of slices needed to access all the data in the buffer, which can be - // larger than out_size. So we keep iterating and counting non-empty slices here, even - // if all the caller-supplied slices have been filled. - num_slices++; + + // Temporary cast to fix 32-bit Envoy mobile builds, where sizeof(uint64_t) != sizeof(size_t). + // dataSize represents the size of a buffer so size_t should always be large enough to hold its + // size regardless of architecture. Buffer slices should in practice be relatively small, but + // there is currently no max size validation. + // TODO(antoniovicente) Set realistic limits on the max size of BufferSlice and consider use of + // size_t instead of uint64_t in the Slice interface. + raw_slices.emplace_back(RawSlice{slice->data(), static_cast(slice->dataSize())}); } - return num_slices; + return raw_slices; } uint64_t OwnedImpl::length() const { @@ -470,9 +476,8 @@ bool OwnedImpl::startsWith(absl::string_view data) const { Api::IoCallUint64Result OwnedImpl::write(Network::IoHandle& io_handle) { constexpr uint64_t MaxSlices = 16; - RawSlice slices[MaxSlices]; - const uint64_t num_slices = std::min(getRawSlices(slices, MaxSlices), MaxSlices); - Api::IoCallUint64Result result = io_handle.writev(slices, num_slices); + RawSliceVector slices = getRawSlices(MaxSlices); + Api::IoCallUint64Result result = io_handle.writev(slices.begin(), slices.size()); if (result.ok() && result.rc_ > 0) { drain(static_cast(result.rc_)); } @@ -488,16 +493,9 @@ OwnedImpl::OwnedImpl(const Instance& data) : OwnedImpl() { add(data); } OwnedImpl::OwnedImpl(const void* data, uint64_t size) : OwnedImpl() { add(data, size); } std::string OwnedImpl::toString() const { - uint64_t num_slices = getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - getRawSlices(slices.begin(), num_slices); - size_t len = 0; - for (const RawSlice& slice : slices) { - len += slice.len_; - } std::string output; - output.reserve(len); - for (const RawSlice& slice : slices) { + output.reserve(length()); + for (const RawSlice& slice : getRawSlices()) { output.append(static_cast(slice.mem_), slice.len_); } diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 4dcc3b3550da..7da3adb82195 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -519,7 +519,7 @@ class OwnedImpl : public LibEventInstance { void commit(RawSlice* iovecs, uint64_t num_iovecs) override; void copyOut(size_t start, uint64_t size, void* data) const override; void drain(uint64_t size) override; - uint64_t getRawSlices(RawSlice* out, uint64_t out_size) const override; + RawSliceVector getRawSlices(absl::optional max_slices = absl::nullopt) const override; uint64_t length() const override; void* linearize(uint32_t size) override; void move(Instance& rhs) override; diff --git a/source/common/buffer/zero_copy_input_stream_impl.cc b/source/common/buffer/zero_copy_input_stream_impl.cc index 9159045b5c33..e94e36799b52 100644 --- a/source/common/buffer/zero_copy_input_stream_impl.cc +++ b/source/common/buffer/zero_copy_input_stream_impl.cc @@ -25,10 +25,10 @@ bool ZeroCopyInputStreamImpl::Next(const void** data, int* size) { position_ = 0; } - Buffer::RawSlice slice; - const uint64_t num_slices = buffer_->getRawSlices(&slice, 1); + Buffer::RawSliceVector slices = buffer_->getRawSlices(1); - if (num_slices > 0 && slice.len_ > 0) { + if (!slices.empty() && slices[0].len_ > 0) { + auto& slice = slices[0]; *data = slice.mem_; *size = slice.len_; position_ = slice.len_; diff --git a/source/common/common/BUILD b/source/common/common/BUILD index a4dd7f32b608..633ff831feab 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -419,3 +419,13 @@ envoy_cc_library( "//source/common/common:utility_lib", ], ) + +envoy_cc_library( + name = "zlib_base_lib", + srcs = ["zlib/base.cc"], + hdrs = ["zlib/base.h"], + external_deps = ["zlib"], + deps = [ + "//source/common/buffer:buffer_lib", + ], +) diff --git a/source/common/common/base64.cc b/source/common/common/base64.cc index b927f409e697..fc4cd599c20d 100644 --- a/source/common/common/base64.cc +++ b/source/common/common/base64.cc @@ -194,13 +194,9 @@ std::string Base64::encode(const Buffer::Instance& buffer, uint64_t length) { std::string ret; ret.reserve(output_length); - uint64_t num_slices = buffer.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - buffer.getRawSlices(slices.begin(), num_slices); - uint64_t j = 0; uint8_t next_c = 0; - for (const Buffer::RawSlice& slice : slices) { + for (const Buffer::RawSlice& slice : buffer.getRawSlices()) { const uint8_t* slice_mem = static_cast(slice.mem_); for (uint64_t i = 0; i < slice.len_ && j < length; ++i, ++j) { diff --git a/source/common/common/hash.h b/source/common/common/hash.h index 8be939356001..29c007274208 100644 --- a/source/common/common/hash.h +++ b/source/common/common/hash.h @@ -122,8 +122,8 @@ struct HeterogeneousStringEqual { using SharedStringSet = absl::flat_hash_set; -template -using StringMap = - absl::flat_hash_map; +// A special heterogeneous comparator is not needed for maps of strings; absl +// hashes allow for looking up a string-container with a string-view by default. +template using StringMap = absl::flat_hash_map; } // namespace Envoy diff --git a/source/common/common/logger.cc b/source/common/common/logger.cc index 3fe1fd020974..3816c0f61ca7 100644 --- a/source/common/common/logger.cc +++ b/source/common/common/logger.cc @@ -53,7 +53,7 @@ void DelegatingLogSink::log(const spdlog::details::log_msg& msg) { // This memory buffer must exist in the scope of the entire function, // otherwise the string_view will refer to memory that is already free. - fmt::memory_buffer formatted; + spdlog::memory_buf_t formatted; if (formatter_) { formatter_->format(msg, formatted); msg_view = absl::string_view(formatted.data(), formatted.size()); diff --git a/source/common/common/mem_block_builder.h b/source/common/common/mem_block_builder.h index ce0c6827dc8b..a43f7e885e22 100644 --- a/source/common/common/mem_block_builder.h +++ b/source/common/common/mem_block_builder.h @@ -27,7 +27,7 @@ template class MemBlockBuilder { // Constructs a MemBlockBuilder allowing for 'capacity' instances of T. explicit MemBlockBuilder(uint64_t capacity) : data_(std::make_unique(capacity)), write_span_(data_.get(), capacity) {} - MemBlockBuilder() {} + MemBlockBuilder() = default; /** * Allocates (or reallocates) memory for the MemBlockBuilder to make it the diff --git a/source/common/common/utility.cc b/source/common/common/utility.cc index fa6d89d3fb29..7a3656f47ba7 100644 --- a/source/common/common/utility.cc +++ b/source/common/common/utility.cc @@ -416,9 +416,11 @@ std::string StringUtil::escape(const std::string& source) { return ret; } -std::string AccessLogDateTimeFormatter::fromTime(const SystemTime& system_time) { - static const std::string DefaultDateFormat = "%Y-%m-%dT%H:%M:%E3SZ"; +const std::string& getDefaultDateFormat() { + CONSTRUCT_ON_FIRST_USE(std::string, "%Y-%m-%dT%H:%M:%E3SZ"); +} +std::string AccessLogDateTimeFormatter::fromTime(const SystemTime& system_time) { struct CachedTime { std::chrono::seconds epoch_time_seconds; std::string formatted_time; @@ -432,8 +434,8 @@ std::string AccessLogDateTimeFormatter::fromTime(const SystemTime& system_time) std::chrono::duration_cast(epoch_time_ms); if (cached_time.formatted_time.empty() || cached_time.epoch_time_seconds != epoch_time_seconds) { - cached_time.formatted_time = - absl::FormatTime(DefaultDateFormat, absl::FromChrono(system_time), absl::UTCTimeZone()); + cached_time.formatted_time = absl::FormatTime( + getDefaultDateFormat(), absl::FromChrono(system_time), absl::UTCTimeZone()); cached_time.epoch_time_seconds = epoch_time_seconds; } else { // Overwrite the digits in the ".000Z" at the end of the string with the diff --git a/source/common/common/zlib/base.cc b/source/common/common/zlib/base.cc new file mode 100644 index 000000000000..5336f35f8735 --- /dev/null +++ b/source/common/common/zlib/base.cc @@ -0,0 +1,24 @@ +#include "common/common/zlib/base.h" + +namespace Envoy { +namespace Zlib { + +Base::Base(uint64_t chunk_size, std::function zstream_deleter) + : chunk_size_{chunk_size}, chunk_char_ptr_(new unsigned char[chunk_size]), + zstream_ptr_(new z_stream(), zstream_deleter) {} + +uint64_t Base::checksum() { return zstream_ptr_->adler; } + +void Base::updateOutput(Buffer::Instance& output_buffer) { + const uint64_t n_output = chunk_size_ - zstream_ptr_->avail_out; + if (n_output == 0) { + return; + } + + output_buffer.add(static_cast(chunk_char_ptr_.get()), n_output); + zstream_ptr_->avail_out = chunk_size_; + zstream_ptr_->next_out = chunk_char_ptr_.get(); +} + +} // namespace Zlib +} // namespace Envoy diff --git a/source/common/common/zlib/base.h b/source/common/common/zlib/base.h new file mode 100644 index 000000000000..4f427fb90985 --- /dev/null +++ b/source/common/common/zlib/base.h @@ -0,0 +1,40 @@ +#pragma once + +#include + +#include "envoy/buffer/buffer.h" + +#include "zlib.h" + +namespace Envoy { +namespace Zlib { + +/** + * Shared code between the compressor and the decompressor. + */ +class Base { +public: + Base(uint64_t chunk_size, std::function zstream_deleter); + + /** + * It returns the checksum of all output produced so far. Compressor's checksum at the end of + * the stream has to match decompressor's checksum produced at the end of the decompression. + * Likewise, the decompressor's checksum has to match the compressor's checksum at the end of + * compression. + * @return uint64_t CRC-32 if a gzip stream is being read or Adler-32 for other compression + * types. + */ + uint64_t checksum(); + +protected: + void updateOutput(Buffer::Instance& output_buffer); + + const uint64_t chunk_size_; + bool initialized_{false}; + + const std::unique_ptr chunk_char_ptr_; + const std::unique_ptr> zstream_ptr_; +}; + +} // namespace Zlib +} // namespace Envoy diff --git a/source/common/compressor/BUILD b/source/common/compressor/BUILD index 8e37a01a23fb..d452e1c968f7 100644 --- a/source/common/compressor/BUILD +++ b/source/common/compressor/BUILD @@ -17,5 +17,6 @@ envoy_cc_library( "//include/envoy/compressor:compressor_interface", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", + "//source/common/common:zlib_base_lib", ], ) diff --git a/source/common/compressor/zlib_compressor_impl.cc b/source/common/compressor/zlib_compressor_impl.cc index 749b58e96193..2f44a5da1a90 100644 --- a/source/common/compressor/zlib_compressor_impl.cc +++ b/source/common/compressor/zlib_compressor_impl.cc @@ -14,8 +14,7 @@ namespace Compressor { ZlibCompressorImpl::ZlibCompressorImpl() : ZlibCompressorImpl(4096) {} ZlibCompressorImpl::ZlibCompressorImpl(uint64_t chunk_size) - : chunk_size_{chunk_size}, initialized_{false}, chunk_char_ptr_(new unsigned char[chunk_size]), - zstream_ptr_(new z_stream(), [](z_stream* z) { + : Zlib::Base(chunk_size, [](z_stream* z) { deflateEnd(z); delete z; }) { @@ -35,14 +34,8 @@ void ZlibCompressorImpl::init(CompressionLevel comp_level, CompressionStrategy c initialized_ = true; } -uint64_t ZlibCompressorImpl::checksum() { return zstream_ptr_->adler; } - void ZlibCompressorImpl::compress(Buffer::Instance& buffer, State state) { - const uint64_t num_slices = buffer.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - buffer.getRawSlices(slices.begin(), num_slices); - - for (const Buffer::RawSlice& input_slice : slices) { + for (const Buffer::RawSlice& input_slice : buffer.getRawSlices()) { zstream_ptr_->avail_in = input_slice.len_; zstream_ptr_->next_in = static_cast(input_slice.mem_); // Z_NO_FLUSH tells the compressor to take the data in and compresses it as much as possible @@ -87,15 +80,5 @@ void ZlibCompressorImpl::process(Buffer::Instance& output_buffer, int64_t flush_ } } -void ZlibCompressorImpl::updateOutput(Buffer::Instance& output_buffer) { - const uint64_t n_output = chunk_size_ - zstream_ptr_->avail_out; - if (n_output > 0) { - output_buffer.add(static_cast(chunk_char_ptr_.get()), n_output); - } - chunk_char_ptr_ = std::make_unique(chunk_size_); - zstream_ptr_->avail_out = chunk_size_; - zstream_ptr_->next_out = chunk_char_ptr_.get(); -} - } // namespace Compressor } // namespace Envoy diff --git a/source/common/compressor/zlib_compressor_impl.h b/source/common/compressor/zlib_compressor_impl.h index ecfdc8aa4163..396e7ff250aa 100644 --- a/source/common/compressor/zlib_compressor_impl.h +++ b/source/common/compressor/zlib_compressor_impl.h @@ -2,6 +2,8 @@ #include "envoy/compressor/compressor.h" +#include "common/common/zlib/base.h" + #include "zlib.h" namespace Envoy { @@ -10,7 +12,7 @@ namespace Compressor { /** * Implementation of compressor's interface. */ -class ZlibCompressorImpl : public Compressor { +class ZlibCompressorImpl : public Zlib::Base, public Compressor { public: ZlibCompressorImpl(); @@ -32,9 +34,9 @@ class ZlibCompressorImpl : public Compressor { * manual. */ enum class CompressionLevel : int64_t { - Best = 9, - Speed = 1, - Standard = -1, + Best = Z_BEST_COMPRESSION, + Speed = Z_BEST_SPEED, + Standard = Z_DEFAULT_COMPRESSION, }; /** @@ -45,10 +47,10 @@ class ZlibCompressorImpl : public Compressor { * standard: used for normal data. (default) @see Z_DEFAULT_STRATEGY in zlib manual. */ enum class CompressionStrategy : uint64_t { - Filtered = 1, - Huffman = 2, - Rle = 3, - Standard = 4, + Filtered = Z_FILTERED, + Huffman = Z_HUFFMAN_ONLY, + Rle = Z_RLE, + Standard = Z_DEFAULT_STRATEGY, }; /** @@ -64,27 +66,12 @@ class ZlibCompressorImpl : public Compressor { void init(CompressionLevel level, CompressionStrategy strategy, int64_t window_bits, uint64_t memory_level); - /** - * It returns the checksum of all output produced so far. Compressor's checksum at the end of the - * stream has to match decompressor's checksum produced at the end of the decompression. - * @return uint64_t CRC-32 if a gzip stream is being written or Adler-32 for other compression - * types. - */ - uint64_t checksum(); - // Compressor void compress(Buffer::Instance& buffer, State state) override; private: bool deflateNext(int64_t flush_state); void process(Buffer::Instance& output_buffer, int64_t flush_state); - void updateOutput(Buffer::Instance& output_buffer); - - const uint64_t chunk_size_; - bool initialized_; - - std::unique_ptr chunk_char_ptr_; - std::unique_ptr> zstream_ptr_; }; } // namespace Compressor diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 082f1cc30f50..aa386f3c5d66 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -251,6 +251,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "resource_name_lib", + hdrs = ["resource_name.h"], + deps = [ + ":api_type_oracle_lib", + "//source/common/common:assert_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "resources_lib", hdrs = ["resources.h"], @@ -361,6 +371,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "subscription_base_interface", + hdrs = ["subscription_base.h"], + deps = [ + ":resource_name_lib", + "//include/envoy/config:subscription_interface", + ], +) + envoy_cc_library( name = "well_known_names", srcs = ["well_known_names.cc"], diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index c72bb7062d0e..1e495a2a7f94 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -244,4 +244,4 @@ void GrpcMuxImpl::drainRequests() { } } // namespace Config -} // namespace Envoy +} // namespace Envoy \ No newline at end of file diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index fe0ff5bf1ae2..c7d63a93d01d 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -76,7 +76,7 @@ class NewGrpcMuxImpl WatchImpl(const std::string& type_url, Watch* watch, NewGrpcMuxImpl& parent) : type_url_(type_url), watch_(watch), parent_(parent) {} - ~WatchImpl() { remove(); } + ~WatchImpl() override { remove(); } void remove() { if (watch_) { diff --git a/source/common/config/remote_data_fetcher.cc b/source/common/config/remote_data_fetcher.cc index 1123581a533e..2572e0091389 100644 --- a/source/common/config/remote_data_fetcher.cc +++ b/source/common/config/remote_data_fetcher.cc @@ -39,7 +39,8 @@ void RemoteDataFetcher::fetch() { DurationUtil::durationToMilliseconds(uri_.timeout())))); } -void RemoteDataFetcher::onSuccess(Http::ResponseMessagePtr&& response) { +void RemoteDataFetcher::onSuccess(const Http::AsyncClient::Request&, + Http::ResponseMessagePtr&& response) { const uint64_t status_code = Http::Utility::getResponseStatus(response->headers()); if (status_code == enumToInt(Http::Code::OK)) { ENVOY_LOG(debug, "fetch remote data [uri = {}]: success", uri_.uri()); @@ -66,7 +67,8 @@ void RemoteDataFetcher::onSuccess(Http::ResponseMessagePtr&& response) { request_ = nullptr; } -void RemoteDataFetcher::onFailure(Http::AsyncClient::FailureReason reason) { +void RemoteDataFetcher::onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) { ENVOY_LOG(debug, "fetch remote data [uri = {}]: network error {}", uri_.uri(), enumToInt(reason)); request_ = nullptr; callback_.onFailure(FailureReason::Network); diff --git a/source/common/config/remote_data_fetcher.h b/source/common/config/remote_data_fetcher.h index ced327a9fa37..34a7863ff2f0 100644 --- a/source/common/config/remote_data_fetcher.h +++ b/source/common/config/remote_data_fetcher.h @@ -50,8 +50,9 @@ class RemoteDataFetcher : public Logger::Loggable, ~RemoteDataFetcher() override; // Http::AsyncClient::Callbacks - void onSuccess(Http::ResponseMessagePtr&& response) override; - void onFailure(Http::AsyncClient::FailureReason reason) override; + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override; + void onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) override; /** * Fetch data from remote. diff --git a/source/common/config/resource_name.h b/source/common/config/resource_name.h new file mode 100644 index 000000000000..b21a7d47d710 --- /dev/null +++ b/source/common/config/resource_name.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include + +#include "envoy/config/core/v3/config_source.pb.h" + +#include "common/common/assert.h" +#include "common/config/api_type_oracle.h" + +namespace Envoy { +namespace Config { + +/** + * Get resource name from api type and version. + */ +template +std::string getResourceName(envoy::config::core::v3::ApiVersion resource_api_version) { + switch (resource_api_version) { + case envoy::config::core::v3::ApiVersion::AUTO: + case envoy::config::core::v3::ApiVersion::V2: + return ApiTypeOracle::getEarlierVersionMessageTypeName(Current().GetDescriptor()->full_name()) + .value(); + case envoy::config::core::v3::ApiVersion::V3: + return Current().GetDescriptor()->full_name(); + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +/** + * Get type url from api type and version. + */ +template +std::string getTypeUrl(envoy::config::core::v3::ApiVersion resource_api_version) { + return "type.googleapis.com/" + getResourceName(resource_api_version); +} + +/** + * get all version resource names. + */ +template std::vector getAllVersionResourceNames() { + return std::vector{ + Current().GetDescriptor()->full_name(), + ApiTypeOracle::getEarlierVersionMessageTypeName(Current().GetDescriptor()->full_name()) + .value()}; +} + +/** + * get all version type urls. + */ +template std::vector getAllVersionTypeUrls() { + auto resource_names = getAllVersionResourceNames(); + for (auto&& resource_name : resource_names) { + resource_name = "type.googleapis.com/" + resource_name; + } + return resource_names; +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/subscription_base.h b/source/common/config/subscription_base.h new file mode 100644 index 000000000000..dd5686f2ffaf --- /dev/null +++ b/source/common/config/subscription_base.h @@ -0,0 +1,24 @@ +#pragma once + +#include "envoy/config/subscription.h" + +#include "common/config/resource_name.h" + +namespace Envoy { +namespace Config { + +template struct SubscriptionBase : public Config::SubscriptionCallbacks { +public: + SubscriptionBase(const envoy::config::core::v3::ApiVersion api_version) + : api_version_(api_version) {} + + std::string getResourceName() const { + return Envoy::Config::getResourceName(api_version_); + } + +private: + const envoy::config::core::v3::ApiVersion api_version_; +}; + +} // namespace Config +} // namespace Envoy \ No newline at end of file diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index d86148d83ac8..93a5c9035930 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -16,7 +16,6 @@ #include "common/common/hex.h" #include "common/common/utility.h" #include "common/config/api_type_oracle.h" -#include "common/config/resources.h" #include "common/config/version_converter.h" #include "common/config/well_known_names.h" #include "common/protobuf/protobuf.h" @@ -60,13 +59,13 @@ void Utility::translateApiConfigSource( } void Utility::checkCluster(absl::string_view error_prefix, absl::string_view cluster_name, - Upstream::ClusterManager& cm) { + Upstream::ClusterManager& cm, bool allow_added_via_api) { Upstream::ThreadLocalCluster* cluster = cm.get(cluster_name); if (cluster == nullptr) { throw EnvoyException(fmt::format("{}: unknown cluster '{}'", error_prefix, cluster_name)); } - if (cluster->info()->addedViaApi()) { + if (!allow_added_via_api && cluster->info()->addedViaApi()) { throw EnvoyException(fmt::format("{}: invalid cluster '{}': currently only " "static (non-CDS) clusters are supported", error_prefix, cluster_name)); diff --git a/source/common/config/utility.h b/source/common/config/utility.h index ce7b0040fd61..7ad472d779fa 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -114,9 +114,11 @@ class Utility { * @param error_prefix supplies the prefix to use in error messages. * @param cluster_name supplies the cluster name to check. * @param cm supplies the cluster manager. + * @param allow_added_via_api indicates whether a cluster is allowed to be added via api + * rather than be a static resource from the bootstrap config. */ static void checkCluster(absl::string_view error_prefix, absl::string_view cluster_name, - Upstream::ClusterManager& cm); + Upstream::ClusterManager& cm, bool allow_added_via_api = false); /** * Check cluster/local info for API config sanity. Throws on error. diff --git a/source/common/decompressor/BUILD b/source/common/decompressor/BUILD index 29d7e7973795..dfdf8f9b90ed 100644 --- a/source/common/decompressor/BUILD +++ b/source/common/decompressor/BUILD @@ -18,5 +18,6 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", + "//source/common/common:zlib_base_lib", ], ) diff --git a/source/common/decompressor/zlib_decompressor_impl.cc b/source/common/decompressor/zlib_decompressor_impl.cc index de15a52b3651..55dffc6d3609 100644 --- a/source/common/decompressor/zlib_decompressor_impl.cc +++ b/source/common/decompressor/zlib_decompressor_impl.cc @@ -14,8 +14,7 @@ namespace Decompressor { ZlibDecompressorImpl::ZlibDecompressorImpl() : ZlibDecompressorImpl(4096) {} ZlibDecompressorImpl::ZlibDecompressorImpl(uint64_t chunk_size) - : chunk_size_{chunk_size}, initialized_{false}, chunk_char_ptr_(new unsigned char[chunk_size]), - zstream_ptr_(new z_stream(), [](z_stream* z) { + : Zlib::Base(chunk_size, [](z_stream* z) { inflateEnd(z); delete z; }) { @@ -33,15 +32,9 @@ void ZlibDecompressorImpl::init(int64_t window_bits) { initialized_ = true; } -uint64_t ZlibDecompressorImpl::checksum() { return zstream_ptr_->adler; } - void ZlibDecompressorImpl::decompress(const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) { - const uint64_t num_slices = input_buffer.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - input_buffer.getRawSlices(slices.begin(), num_slices); - - for (const Buffer::RawSlice& input_slice : slices) { + for (const Buffer::RawSlice& input_slice : input_buffer.getRawSlices()) { zstream_ptr_->avail_in = input_slice.len_; zstream_ptr_->next_in = static_cast(input_slice.mem_); while (inflateNext()) { @@ -81,15 +74,5 @@ bool ZlibDecompressorImpl::inflateNext() { return true; } -void ZlibDecompressorImpl::updateOutput(Buffer::Instance& output_buffer) { - const uint64_t n_output = chunk_size_ - zstream_ptr_->avail_out; - if (n_output > 0) { - output_buffer.add(static_cast(chunk_char_ptr_.get()), n_output); - } - chunk_char_ptr_ = std::make_unique(chunk_size_); - zstream_ptr_->avail_out = chunk_size_; - zstream_ptr_->next_out = chunk_char_ptr_.get(); -} - } // namespace Decompressor } // namespace Envoy diff --git a/source/common/decompressor/zlib_decompressor_impl.h b/source/common/decompressor/zlib_decompressor_impl.h index dcb08347be1a..8d5627fc6c31 100644 --- a/source/common/decompressor/zlib_decompressor_impl.h +++ b/source/common/decompressor/zlib_decompressor_impl.h @@ -3,6 +3,7 @@ #include "envoy/decompressor/decompressor.h" #include "common/common/logger.h" +#include "common/common/zlib/base.h" #include "zlib.h" @@ -12,7 +13,8 @@ namespace Decompressor { /** * Implementation of decompressor's interface. */ -class ZlibDecompressorImpl : public Decompressor, +class ZlibDecompressorImpl : public Zlib::Base, + public Decompressor, public Logger::Loggable { public: ZlibDecompressorImpl(); @@ -35,14 +37,6 @@ class ZlibDecompressorImpl : public Decompressor, */ void init(int64_t window_bits); - /** - * It returns the checksum of all output produced so far. Decompressor's checksum at the end of - * the stream has to match compressor's checksum produced at the end of the compression. - * @return uint64_t CRC-32 if a gzip stream is being read or Adler-32 for other compression - * types. - */ - uint64_t checksum(); - // Decompressor void decompress(const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) override; @@ -52,13 +46,6 @@ class ZlibDecompressorImpl : public Decompressor, private: bool inflateNext(); - void updateOutput(Buffer::Instance& output_buffer); - - const uint64_t chunk_size_; - bool initialized_; - - std::unique_ptr chunk_char_ptr_; - std::unique_ptr> zstream_ptr_; }; } // namespace Decompressor diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index 274c111e1911..65085afdaf43 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -101,10 +101,11 @@ void DispatcherImpl::clearDeferredDeleteList() { Network::ConnectionPtr DispatcherImpl::createServerConnection(Network::ConnectionSocketPtr&& socket, - Network::TransportSocketPtr&& transport_socket) { + Network::TransportSocketPtr&& transport_socket, + StreamInfo::StreamInfo& stream_info) { ASSERT(isThreadSafe()); return std::make_unique(*this, std::move(socket), - std::move(transport_socket), true); + std::move(transport_socket), stream_info, true); } Network::ClientConnectionPtr diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index 81800f01c0d1..51a667639522 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -44,9 +44,9 @@ class DispatcherImpl : Logger::Loggable, TimeSource& timeSource() override { return api_.timeSource(); } void initializeStats(Stats::Scope& scope, const std::string& prefix) override; void clearDeferredDeleteList() override; - Network::ConnectionPtr - createServerConnection(Network::ConnectionSocketPtr&& socket, - Network::TransportSocketPtr&& transport_socket) override; + Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket, + Network::TransportSocketPtr&& transport_socket, + StreamInfo::StreamInfo& stream_info) override; Network::ClientConnectionPtr createClientConnection(Network::Address::InstanceConstSharedPtr address, Network::Address::InstanceConstSharedPtr source_address, diff --git a/source/common/event/file_event_impl.cc b/source/common/event/file_event_impl.cc index e7ac463c8342..7607551fc99a 100644 --- a/source/common/event/file_event_impl.cc +++ b/source/common/event/file_event_impl.cc @@ -62,8 +62,12 @@ void FileEventImpl::assignEvents(uint32_t events, event_base* base) { events |= FileReadyType::Closed; } - ASSERT(events); - event->cb_(events); + // TODO(htuch): this should be ASSERT(events), but + // https://github.com/libevent/libevent/issues/984 seems to be producing unexpected + // behavior. The ASSERT should be restored once this issue is resolved. + if (events) { + event->cb_(events); + } }, this); } diff --git a/source/common/filesystem/inotify/watcher_impl.cc b/source/common/filesystem/inotify/watcher_impl.cc index 2db1bb0e56d6..d3e6bd48f69c 100644 --- a/source/common/filesystem/inotify/watcher_impl.cc +++ b/source/common/filesystem/inotify/watcher_impl.cc @@ -80,9 +80,14 @@ void WatcherImpl::onInotifyEvent() { } for (FileWatch& watch : callback_map_[file_event->wd].watches_) { - if (watch.file_ == file && (watch.events_ & events)) { - ENVOY_LOG(debug, "matched callback: file: {}", file); - watch.cb_(events); + if (watch.events_ & events) { + if (watch.file_ == file) { + ENVOY_LOG(debug, "matched callback: file: {}", file); + watch.cb_(events); + } else if (watch.file_.empty()) { + ENVOY_LOG(debug, "matched callback: directory: {}", file); + watch.cb_(events); + } } } diff --git a/source/common/filesystem/kqueue/watcher_impl.cc b/source/common/filesystem/kqueue/watcher_impl.cc index c9d3eecc14a7..aa1589f0cb8a 100644 --- a/source/common/filesystem/kqueue/watcher_impl.cc +++ b/source/common/filesystem/kqueue/watcher_impl.cc @@ -65,9 +65,6 @@ WatcherImpl::FileWatchPtr WatcherImpl::addWatch(absl::string_view path, uint32_t watch->watching_dir_ = watching_dir; u_int flags = NOTE_DELETE | NOTE_RENAME | NOTE_WRITE; - if (watching_dir) { - flags = NOTE_DELETE | NOTE_WRITE; - } struct kevent event; EV_SET(&event, watch_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, flags, 0, @@ -109,6 +106,8 @@ void WatcherImpl::onKqueueEvent() { ASSERT(file != nullptr); ASSERT(watch_fd == file->fd_); + auto pathname = api_.fileSystem().splitPathFromFilename(file->file_); + if (file->watching_dir_) { if (event.fflags & NOTE_DELETE) { // directory was deleted @@ -126,6 +125,10 @@ void WatcherImpl::onKqueueEvent() { events |= Events::MovedTo; } } + } else if (pathname.file_.empty()) { + if (event.fflags & NOTE_WRITE) { + events |= Events::MovedTo; + } } else { // kqueue doesn't seem to work well with NOTE_RENAME and O_SYMLINK, so instead if we // get a NOTE_DELETE on the symlink we check if there is another file with the same diff --git a/source/common/grpc/BUILD b/source/common/grpc/BUILD index 3280a70493f6..29f31e66d444 100644 --- a/source/common/grpc/BUILD +++ b/source/common/grpc/BUILD @@ -107,6 +107,7 @@ envoy_cc_library( hdrs = ["context_impl.h"], external_deps = ["abseil_optional"], deps = [ + ":common_lib", ":stat_names_lib", "//include/envoy/grpc:context_interface", "//include/envoy/http:header_map_interface", diff --git a/source/common/grpc/codec.cc b/source/common/grpc/codec.cc index 9daa9b1ea5e3..4ac61523d315 100644 --- a/source/common/grpc/codec.cc +++ b/source/common/grpc/codec.cc @@ -67,11 +67,8 @@ void Decoder::frameDataEnd() { } uint64_t FrameInspector::inspect(const Buffer::Instance& data) { - uint64_t count = data.getRawSlices(nullptr, 0); - absl::FixedArray slices(count); - data.getRawSlices(slices.begin(), count); uint64_t delta = 0; - for (const Buffer::RawSlice& slice : slices) { + for (const Buffer::RawSlice& slice : data.getRawSlices()) { uint8_t* mem = reinterpret_cast(slice.mem_); for (uint64_t j = 0; j < slice.len_;) { uint8_t c = *mem; diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index b2f42f1bc09f..2019f54ac874 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -297,5 +297,21 @@ bool Common::parseBufferInstance(Buffer::InstancePtr&& buffer, Protobuf::Message return proto.ParseFromZeroCopyStream(&stream); } +absl::optional +Common::resolveServiceAndMethod(const Http::HeaderEntry* path) { + absl::optional request_names; + if (path == nullptr) { + return request_names; + } + absl::string_view str = path->value().getStringView(); + str = str.substr(0, str.find('?')); + const auto parts = StringUtil::splitToken(str, "/"); + if (parts.size() != 2) { + return request_names; + } + request_names = RequestNames{parts[0], parts[1]}; + return request_names; +} + } // namespace Grpc } // namespace Envoy diff --git a/source/common/grpc/common.h b/source/common/grpc/common.h index 732336bff53c..b450e7817e54 100644 --- a/source/common/grpc/common.h +++ b/source/common/grpc/common.h @@ -153,6 +153,20 @@ class Common { */ static bool parseBufferInstance(Buffer::InstancePtr&& buffer, Protobuf::Message& proto); + struct RequestNames { + absl::string_view service_; + absl::string_view method_; + }; + + /** + * Resolve the gRPC service and method from the HTTP2 :path header. + * @param path supplies the :path header. + * @return if both gRPC serve and method have been resolved successfully returns + * a populated RequestNames, otherwise returns an empty optional. + * @note The return value is only valid as long as `path` is still valid and unmodified. + */ + static absl::optional resolveServiceAndMethod(const Http::HeaderEntry* path); + private: static void checkForHeaderOnlyError(Http::ResponseMessage& http_response); }; diff --git a/source/common/grpc/context_impl.cc b/source/common/grpc/context_impl.cc index f75d31b30703..04c56ddef1b3 100644 --- a/source/common/grpc/context_impl.cc +++ b/source/common/grpc/context_impl.cc @@ -3,6 +3,8 @@ #include #include +#include "common/grpc/common.h" + namespace Envoy { namespace Grpc { @@ -32,8 +34,22 @@ Stats::StatName ContextImpl::makeDynamicStatName(absl::string_view name) { return stat_name; } +// Gets the stat prefix and underlying storage, depending on whether request_names is empty +std::pair +ContextImpl::getPrefix(Protocol protocol, const absl::optional& request_names) { + const Stats::StatName protocolName = protocolStatName(protocol); + if (request_names) { + Stats::SymbolTable::StoragePtr prefix_storage = + symbol_table_.join({protocolName, request_names->service_, request_names->method_}); + Stats::StatName prefix = Stats::StatName(prefix_storage.get()); + return {prefix, std::move(prefix_storage)}; + } else { + return {protocolName, nullptr}; + } +} + void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, - const RequestNames& request_names, + const absl::optional& request_names, const Http::HeaderEntry* grpc_status) { if (!grpc_status) { return; @@ -44,18 +60,19 @@ void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol prot const Stats::StatName status_stat_name = (iter != stat_names_.status_names_.end()) ? iter->second : makeDynamicStatName(status_str); const Stats::SymbolTable::StoragePtr stat_name_storage = - symbol_table_.join({protocolStatName(protocol), request_names.service_, request_names.method_, - status_stat_name}); + request_names ? symbol_table_.join({protocolStatName(protocol), request_names->service_, + request_names->method_, status_stat_name}) + : symbol_table_.join({protocolStatName(protocol), status_stat_name}); cluster.statsScope().counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); chargeStat(cluster, protocol, request_names, (status_str == "0")); } void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, - const RequestNames& request_names, bool success) { - const Stats::SymbolTable::StoragePtr prefix_storage = symbol_table_.join( - {protocolStatName(protocol), request_names.service_, request_names.method_}); - const Stats::StatName prefix(prefix_storage.get()); + const absl::optional& request_names, bool success) { + auto prefix_and_storage = getPrefix(protocol, request_names); + Stats::StatName prefix = prefix_and_storage.first; + const Stats::SymbolTable::StoragePtr status = symbol_table_.join({prefix, successStatName(success)}); const Stats::SymbolTable::StoragePtr total = symbol_table_.join({prefix, total_}); @@ -65,15 +82,16 @@ void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol prot } void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, - const RequestNames& request_names, bool success) { + const absl::optional& request_names, bool success) { chargeStat(cluster, Protocol::Grpc, request_names, success); } void ContextImpl::chargeRequestMessageStat(const Upstream::ClusterInfo& cluster, - const RequestNames& request_names, uint64_t amount) { - const Stats::SymbolTable::StoragePtr prefix_storage = symbol_table_.join( - {protocolStatName(Protocol::Grpc), request_names.service_, request_names.method_}); - const Stats::StatName prefix(prefix_storage.get()); + const absl::optional& request_names, + uint64_t amount) { + auto prefix_and_storage = getPrefix(Protocol::Grpc, request_names); + Stats::StatName prefix = prefix_and_storage.first; + const Stats::SymbolTable::StoragePtr request_message_count = symbol_table_.join({prefix, request_message_count_}); @@ -83,10 +101,11 @@ void ContextImpl::chargeRequestMessageStat(const Upstream::ClusterInfo& cluster, } void ContextImpl::chargeResponseMessageStat(const Upstream::ClusterInfo& cluster, - const RequestNames& request_names, uint64_t amount) { - const Stats::SymbolTable::StoragePtr prefix_storage = symbol_table_.join( - {protocolStatName(Protocol::Grpc), request_names.service_, request_names.method_}); - const Stats::StatName prefix(prefix_storage.get()); + const absl::optional& request_names, + uint64_t amount) { + auto prefix_and_storage = getPrefix(Protocol::Grpc, request_names); + Stats::StatName prefix = prefix_and_storage.first; + const Stats::SymbolTable::StoragePtr response_message_count = symbol_table_.join({prefix, response_message_count_}); @@ -95,22 +114,16 @@ void ContextImpl::chargeResponseMessageStat(const Upstream::ClusterInfo& cluster .add(amount); } -absl::optional -ContextImpl::resolveServiceAndMethod(const Http::HeaderEntry* path) { - absl::optional request_names; - if (path == nullptr) { - return request_names; +absl::optional +ContextImpl::resolveDynamicServiceAndMethod(const Http::HeaderEntry* path) { + absl::optional request_names = Common::resolveServiceAndMethod(path); + if (!request_names) { + return {}; } - absl::string_view str = path->value().getStringView(); - str = str.substr(0, str.find('?')); - const auto parts = StringUtil::splitToken(str, "/"); - if (parts.size() != 2) { - return request_names; - } - const Stats::StatName service = makeDynamicStatName(parts[0]); - const Stats::StatName method = makeDynamicStatName(parts[1]); - request_names = RequestNames{service, method}; - return request_names; + + const Stats::StatName service = makeDynamicStatName(request_names->service_); + const Stats::StatName method = makeDynamicStatName(request_names->method_); + return RequestStatNames{service, method}; } } // namespace Grpc diff --git a/source/common/grpc/context_impl.h b/source/common/grpc/context_impl.h index 431799eabcd6..e220802eefb5 100644 --- a/source/common/grpc/context_impl.h +++ b/source/common/grpc/context_impl.h @@ -15,7 +15,7 @@ namespace Envoy { namespace Grpc { -struct Context::RequestNames { +struct Context::RequestStatNames { Stats::StatName service_; // supplies the service name. Stats::StatName method_; // supplies the method name. }; @@ -26,24 +26,27 @@ class ContextImpl : public Context { // Context void chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, - const RequestNames& request_names, const Http::HeaderEntry* grpc_status) override; + const absl::optional& request_names, + const Http::HeaderEntry* grpc_status) override; void chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, - const RequestNames& request_names, bool success) override; - void chargeStat(const Upstream::ClusterInfo& cluster, const RequestNames& request_names, - bool success) override; + const absl::optional& request_names, bool success) override; + void chargeStat(const Upstream::ClusterInfo& cluster, + const absl::optional& request_names, bool success) override; void chargeRequestMessageStat(const Upstream::ClusterInfo& cluster, - const RequestNames& request_names, uint64_t amount) override; + const absl::optional& request_names, + uint64_t amount) override; void chargeResponseMessageStat(const Upstream::ClusterInfo& cluster, - const RequestNames& request_names, uint64_t amount) override; + const absl::optional& request_names, + uint64_t amount) override; /** * Resolve the gRPC service and method from the HTTP2 :path header. * @param path supplies the :path header. - * @param service supplies the output pointer of the gRPC service. - * @param method supplies the output pointer of the gRPC method. - * @return bool true if both gRPC serve and method have been resolved successfully. + * @return if both gRPC serve and method have been resolved successfully returns + * a populated RequestStatNames, otherwise returns an empty optional. */ - absl::optional resolveServiceAndMethod(const Http::HeaderEntry* path) override; + absl::optional + resolveDynamicServiceAndMethod(const Http::HeaderEntry* path) override; Stats::StatName successStatName(bool success) const { return success ? success_ : failure_; } Stats::StatName protocolStatName(Protocol protocol) const { @@ -61,6 +64,13 @@ class ContextImpl : public Context { // a lock-free approach to creating dynamic stat-names based on requests. Stats::StatName makeDynamicStatName(absl::string_view name); + // Gets the stat prefix and underlying storage, depending on whether request_names is empty + // or not. + // Prefix will be "" if request_names is empty, or + // ".." if it is not empty. + std::pair + getPrefix(Protocol protocol, const absl::optional& request_names); + Stats::SymbolTable& symbol_table_; mutable Thread::MutexBasicLockable mutex_; Stats::StatNamePool stat_name_pool_ ABSL_GUARDED_BY(mutex_); diff --git a/source/common/grpc/google_grpc_utils.cc b/source/common/grpc/google_grpc_utils.cc index 383f814916a2..395ad33151f2 100644 --- a/source/common/grpc/google_grpc_utils.cc +++ b/source/common/grpc/google_grpc_utils.cc @@ -66,24 +66,17 @@ grpc::ByteBuffer GoogleGrpcUtils::makeByteBuffer(Buffer::InstancePtr&& buffer_in if (!buffer_instance) { return {}; } - Buffer::RawSlice on_raw_slice; - // NB: we need to pass in >= 1 in order to get the real "n" (see Buffer::Instance for details). - const int n_slices = buffer_instance->getRawSlices(&on_raw_slice, 1); - if (n_slices <= 0) { + Buffer::RawSliceVector raw_slices = buffer_instance->getRawSlices(); + if (raw_slices.empty()) { return {}; } - auto* container = new BufferInstanceContainer{n_slices, std::move(buffer_instance)}; - if (n_slices == 1) { - grpc::Slice one_slice(on_raw_slice.mem_, on_raw_slice.len_, - &BufferInstanceContainer::derefBufferInstanceContainer, container); - return {&one_slice, 1}; - } - absl::FixedArray many_raw_slices(n_slices); - container->buffer_->getRawSlices(many_raw_slices.begin(), n_slices); + + auto* container = + new BufferInstanceContainer{static_cast(raw_slices.size()), std::move(buffer_instance)}; std::vector slices; - slices.reserve(n_slices); - for (int i = 0; i < n_slices; i++) { - slices.emplace_back(many_raw_slices[i].mem_, many_raw_slices[i].len_, + slices.reserve(raw_slices.size()); + for (Buffer::RawSlice& raw_slice : raw_slices) { + slices.emplace_back(raw_slice.mem_, raw_slice.len_, &BufferInstanceContainer::derefBufferInstanceContainer, container); } return {&slices[0], slices.size()}; diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 193decf64439..3deaff82c305 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -152,6 +152,7 @@ envoy_cc_library( ":date_provider_lib", "//include/envoy/config:config_provider_interface", "//include/envoy/http:filter_interface", + "//include/envoy/http:request_id_extension_interface", "//include/envoy/router:rds_interface", "//source/common/network:utility_lib", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", @@ -220,7 +221,6 @@ envoy_cc_library( "//source/common/http/http3:well_known_names", "//source/common/network:utility_lib", "//source/common/router:config_lib", - "//source/common/runtime:uuid_util_lib", "//source/common/stats:timespan_lib", "//source/common/stream_info:stream_info_lib", "//source/common/tracing:http_tracer_lib", @@ -393,3 +393,22 @@ envoy_cc_library( "//source/common/common:logger_lib", ], ) + +envoy_cc_library( + name = "request_id_extension_lib", + srcs = [ + "request_id_extension_impl.cc", + "request_id_extension_uuid_impl.cc", + ], + hdrs = [ + "request_id_extension_impl.h", + "request_id_extension_uuid_impl.h", + ], + deps = [ + "//include/envoy/http:request_id_extension_interface", + "//include/envoy/server:request_id_extension_config_interface", + "//source/common/config:utility_lib", + "//source/common/runtime:runtime_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index 42a755c1d125..cc5659da7885 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -267,7 +267,7 @@ void AsyncRequestImpl::onComplete() { response_->trailers(), streamInfo(), Tracing::EgressConfig::get()); - callbacks_.onSuccess(std::move(response_)); + callbacks_.onSuccess(*this, std::move(response_)); } void AsyncRequestImpl::onHeaders(ResponseHeaderMapPtr&& headers, bool) { @@ -302,7 +302,7 @@ void AsyncRequestImpl::onReset() { if (!cancelled_) { // In this case we don't have a valid response so we do need to raise a failure. - callbacks_.onFailure(AsyncClient::FailureReason::Reset); + callbacks_.onFailure(*this, AsyncClient::FailureReason::Reset); } } diff --git a/source/common/http/codec_wrappers.h b/source/common/http/codec_wrappers.h index 20ba1ae88cc2..6a4503e53451 100644 --- a/source/common/http/codec_wrappers.h +++ b/source/common/http/codec_wrappers.h @@ -93,6 +93,10 @@ class RequestEncoderWrapper : public RequestEncoder { Stream& getStream() override { return inner_.getStream(); } + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { + return inner_.http1StreamEncoderOptions(); + } + protected: RequestEncoderWrapper(RequestEncoder& inner) : inner_(inner) {} diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 3264e57dccc5..774b5e9f47c5 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -3,6 +3,7 @@ #include "envoy/config/config_provider.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/http/filter.h" +#include "envoy/http/request_id_extension.h" #include "envoy/router/rds.h" #include "envoy/stats/scope.h" #include "envoy/tracing/http_tracer.h" @@ -193,6 +194,11 @@ class ConnectionManagerConfig { virtual ~ConnectionManagerConfig() = default; + /** + * @return RequestIDExtensionSharedPtr The request id utilities instance to use + */ + virtual RequestIDExtensionSharedPtr requestIDExtension() PURE; + /** * @return const std::list& the access logs to write to. */ @@ -412,6 +418,13 @@ class ConnectionManagerConfig { * one. */ virtual bool shouldMergeSlashes() const PURE; + + /** + * @return the action HttpConnectionManager should take when receiving client request + * headers containing underscore characters. + */ + virtual envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const PURE; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 242cd000231e..2b3946187846 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -281,6 +281,10 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod void ConnectionManagerImpl::handleCodecException(const char* error) { ENVOY_CONN_LOG(debug, "dispatch error: {}", read_callbacks_->connection(), error); + read_callbacks_->connection().streamInfo().setResponseCodeDetails( + absl::StrCat("codec error: ", error)); + read_callbacks_->connection().streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::DownstreamProtocolError); // HTTP/1.1 codec has already sent a 400 response if possible. HTTP/2 codec has already sent // GOAWAY. @@ -322,19 +326,6 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool try { codec_->dispatch(data); } catch (const FrameFloodException& e) { - // TODO(mattklein123): This is an emergency substitute for the lack of connection level - // logging in the HCM. In a public follow up change we will add full support for connection - // level logging in the HCM, similar to what we have in tcp_proxy. This will allow abuse - // indicators to be stored in the connection level stream info, and then matched, sampled, - // etc. when logged. - const envoy::type::v3::FractionalPercent default_value; // 0 - if (runtime_.snapshot().featureEnabled("http.connection_manager.log_flood_exception", - default_value)) { - ENVOY_CONN_LOG(warn, "downstream HTTP flood from IP '{}': {}", - read_callbacks_->connection(), - read_callbacks_->connection().remoteAddress()->asString(), e.what()); - } - handleCodecException(e.what()); return Network::FilterStatus::StopIteration; } catch (const CodecProtocolException& e) { @@ -362,6 +353,10 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool } } while (redispatch); + if (!read_callbacks_->connection().streamInfo().protocol()) { + read_callbacks_->connection().streamInfo().protocol(codec_->protocol()); + } + return Network::FilterStatus::StopIteration; } @@ -557,6 +552,9 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect connection_manager.config_.scopedRouteConfigProvider() == nullptr)), "Either routeConfigProvider or scopedRouteConfigProvider should be set in " "ConnectionManagerImpl."); + + stream_info_.setRequestIDExtension(connection_manager.config_.requestIDExtension()); + if (connection_manager_.config_.isRoutable() && connection_manager.config_.routeConfigProvider() != nullptr) { route_config_update_requester_ = @@ -904,8 +902,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // Modify the downstream remote address depending on configuration and headers. stream_info_.setDownstreamRemoteAddress(ConnectionManagerUtility::mutateRequestHeaders( *request_headers_, connection_manager_.read_callbacks_->connection(), - connection_manager_.config_, *snapped_route_config_, connection_manager_.random_generator_, - connection_manager_.local_info_)); + connection_manager_.config_, *snapped_route_config_, connection_manager_.local_info_)); } ASSERT(stream_info_.downstreamRemoteAddress() != nullptr); @@ -1530,7 +1527,9 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( // Strip the T-E headers etc. Defer other header additions as well as drain-close logic to the // continuation headers. - ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), EMPTY_STRING); + ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), + connection_manager_.config_.requestIDExtension(), + EMPTY_STRING); // Count both the 1xx and follow-up response code in stats. chargeStats(headers); @@ -1613,6 +1612,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMa headers.setReferenceServer(connection_manager_.config_.serverName()); } ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), + connection_manager_.config_.requestIDExtension(), connection_manager_.config_.via()); // See if we want to drain/close the connection. Send the go away frame prior to encoding the diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index bb3292dc0b87..10bcc7522bd6 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -393,6 +393,11 @@ class ConnectionManagerImpl : Logger::Loggable, ASSERT(parent_.state_.latest_data_encoding_filter_ == this); callback(*parent_.buffered_response_data_.get()); } + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { + // TODO(mattklein123): At some point we might want to actually wrap this interface but for now + // we give the filter direct access to the encoder options. + return parent_.response_encoder_->http1StreamEncoderOptions(); + } void responseDataTooLarge(); void responseDataDrained(); @@ -609,7 +614,7 @@ class ConnectionManagerImpl : Logger::Loggable, : remote_complete_(false), local_complete_(false), codec_saw_local_complete_(false), saw_connection_close_(false), successful_upgrade_(false), created_filter_chain_(false), is_internally_created_(false), decorated_propagate_(true), has_continue_headers_(false), - is_head_request_(false), decoding_headers_only_(false), encoding_headers_only_(false) {} + is_head_request_(false) {} uint32_t filter_call_state_{0}; // The following 3 members are booleans rather than part of the space-saving bitfield as they @@ -639,10 +644,10 @@ class ConnectionManagerImpl : Logger::Loggable, bool is_head_request_ : 1; // Whether a filter has indicated that the request should be treated as a headers only // request. - bool decoding_headers_only_; + bool decoding_headers_only_{false}; // Whether a filter has indicated that the response should be treated as a headers only // response. - bool encoding_headers_only_; + bool encoding_headers_only_{false}; // Used to track which filter is the latest filter that has received data. ActiveStreamEncoderFilter* latest_data_encoding_filter_{}; diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 004ac2453b6e..615bcf06baa5 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -15,7 +15,6 @@ #include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/network/utility.h" -#include "common/runtime/uuid_util.h" #include "common/tracing/http_tracer_impl.h" #include "absl/strings/str_cat.h" @@ -44,22 +43,24 @@ ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( Network::Connection& connection, const Buffer::Instance& data, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const Http1Settings& http1_settings, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - uint32_t max_request_headers_kb, uint32_t max_request_headers_count) { + uint32_t max_request_headers_kb, uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) { if (determineNextProtocol(connection, data) == Http2::ALPN_STRING) { - return std::make_unique(connection, callbacks, scope, - http2_options, max_request_headers_kb, - max_request_headers_count); + return std::make_unique( + connection, callbacks, scope, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); } else { - return std::make_unique(connection, scope, callbacks, - http1_settings, max_request_headers_kb, - max_request_headers_count); + return std::make_unique( + connection, scope, callbacks, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); } } Network::Address::InstanceConstSharedPtr ConnectionManagerUtility::mutateRequestHeaders( RequestHeaderMap& request_headers, Network::Connection& connection, ConnectionManagerConfig& config, const Router::Config& route_config, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info) { + const LocalInfo::LocalInfo& local_info) { // If this is a Upgrade request, do not remove the Connection and Upgrade headers, // as we forward them verbatim to the upstream hosts. if (Utility::isUpgrade(request_headers)) { @@ -217,12 +218,11 @@ Network::Address::InstanceConstSharedPtr ConnectionManagerUtility::mutateRequest // Generate x-request-id for all edge requests, or if there is none. if (config.generateRequestId()) { - // TODO(PiotrSikora) PERF: Write UUID directly to the header map. - if ((!config.preserveExternalRequestId() && edge_request) || !request_headers.RequestId()) { - const std::string uuid = random.uuid(); - ASSERT(!uuid.empty()); - request_headers.setRequestId(uuid); - } + auto rid_extension = config.requestIDExtension(); + // Unconditionally set a request ID if we are allowed to override it from + // the edge. Otherwise just ensure it is set. + const bool force_set = !config.preserveExternalRequestId() && edge_request; + rid_extension->set(request_headers, force_set); } mutateXfccRequestHeader(request_headers, connection, config); @@ -234,15 +234,14 @@ void ConnectionManagerUtility::mutateTracingRequestHeader(RequestHeaderMap& requ Runtime::Loader& runtime, ConnectionManagerConfig& config, const Router::Route* route) { - if (!config.tracingConfig() || !request_headers.RequestId()) { + if (!config.tracingConfig()) { return; } - // TODO(dnoe): Migrate uuidModBy and others below to take string_view (#6580) - std::string x_request_id(request_headers.RequestId()->value().getStringView()); + auto rid_extension = config.requestIDExtension(); uint64_t result; - // Skip if x-request-id is corrupted. - if (!UuidUtils::uuidModBy(x_request_id, result, 10000)) { + // Skip if request-id is corrupted, or non-existent + if (!rid_extension->modBy(request_headers, result, 10000)) { return; } @@ -260,23 +259,21 @@ void ConnectionManagerUtility::mutateTracingRequestHeader(RequestHeaderMap& requ } // Do not apply tracing transformations if we are currently tracing. - if (UuidTraceStatus::NoTrace == UuidUtils::isTraceableUuid(x_request_id)) { + if (TraceStatus::NoTrace == rid_extension->getTraceStatus(request_headers)) { if (request_headers.ClientTraceId() && runtime.snapshot().featureEnabled("tracing.client_enabled", *client_sampling)) { - UuidUtils::setTraceableUuid(x_request_id, UuidTraceStatus::Client); + rid_extension->setTraceStatus(request_headers, TraceStatus::Client); } else if (request_headers.EnvoyForceTrace()) { - UuidUtils::setTraceableUuid(x_request_id, UuidTraceStatus::Forced); + rid_extension->setTraceStatus(request_headers, TraceStatus::Forced); } else if (runtime.snapshot().featureEnabled("tracing.random_sampling", *random_sampling, result)) { - UuidUtils::setTraceableUuid(x_request_id, UuidTraceStatus::Sampled); + rid_extension->setTraceStatus(request_headers, TraceStatus::Sampled); } } if (!runtime.snapshot().featureEnabled("tracing.global_enabled", *overall_sampling, result)) { - UuidUtils::setTraceableUuid(x_request_id, UuidTraceStatus::NoTrace); + rid_extension->setTraceStatus(request_headers, TraceStatus::NoTrace); } - - request_headers.setRequestId(x_request_id); } void ConnectionManagerUtility::mutateXfccRequestHeader(RequestHeaderMap& request_headers, @@ -365,9 +362,9 @@ void ConnectionManagerUtility::mutateXfccRequestHeader(RequestHeaderMap& request } } -void ConnectionManagerUtility::mutateResponseHeaders(ResponseHeaderMap& response_headers, - const RequestHeaderMap* request_headers, - const std::string& via) { +void ConnectionManagerUtility::mutateResponseHeaders( + ResponseHeaderMap& response_headers, const RequestHeaderMap* request_headers, + const RequestIDExtensionSharedPtr& rid_extension, const std::string& via) { if (request_headers != nullptr && Utility::isUpgrade(*request_headers) && Utility::isUpgrade(response_headers)) { // As in mutateRequestHeaders, Upgrade responses have special handling. @@ -384,11 +381,9 @@ void ConnectionManagerUtility::mutateResponseHeaders(ResponseHeaderMap& response } response_headers.removeTransferEncoding(); - if (request_headers != nullptr && request_headers->EnvoyForceTrace() && - request_headers->RequestId()) { - response_headers.setRequestId(request_headers->RequestId()->value().getStringView()); + if (request_headers != nullptr && request_headers->EnvoyForceTrace()) { + rid_extension->setInResponse(response_headers, *request_headers); } - response_headers.removeKeepAlive(); response_headers.removeProxyConnection(); diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index ebc37a4bda18..20381116162f 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -38,7 +38,9 @@ class ConnectionManagerUtility { ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const Http1Settings& http1_settings, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - uint32_t max_request_headers_kb, uint32_t max_request_headers_count); + uint32_t max_request_headers_kb, uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); /** * Mutates request headers in various ways. This functionality is broken out because of its @@ -54,10 +56,11 @@ class ConnectionManagerUtility { static Network::Address::InstanceConstSharedPtr mutateRequestHeaders(RequestHeaderMap& request_headers, Network::Connection& connection, ConnectionManagerConfig& config, const Router::Config& route_config, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info); + const LocalInfo::LocalInfo& local_info); static void mutateResponseHeaders(ResponseHeaderMap& response_headers, const RequestHeaderMap* request_headers, + const RequestIDExtensionSharedPtr& rid_extension, const std::string& via); // Sanitize the path in the header map if forced by config. diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index a0e0671e650f..a81434cead9c 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -30,7 +30,7 @@ class ConnPoolImplBase : public ConnectionPool::Instance, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - virtual ~ConnPoolImplBase(); + ~ConnPoolImplBase() override; // Closes and destroys all connections. This must be called in the destructor of // derived classes because the derived ActiveClient will downcast parent_ to a more @@ -46,7 +46,7 @@ class ConnPoolImplBase : public ConnectionPool::Instance, public: ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, uint64_t concurrent_request_limit); - virtual ~ActiveClient(); + ~ActiveClient() override; void releaseResources(); diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index a55c44a104cc..9772f3556683 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -143,11 +143,15 @@ bool HeaderUtility::matchHeaders(const HeaderMap& request_headers, const HeaderD return match != header_data.invert_match_; } -bool HeaderUtility::headerIsValid(const absl::string_view header_value) { +bool HeaderUtility::headerValueIsValid(const absl::string_view header_value) { return nghttp2_check_header_value(reinterpret_cast(header_value.data()), header_value.size()) != 0; } +bool HeaderUtility::headerNameContainsUnderscore(const absl::string_view header_name) { + return header_name.find('_') != absl::string_view::npos; +} + bool HeaderUtility::authorityIsValid(const absl::string_view header_value) { return nghttp2_check_authority(reinterpret_cast(header_value.data()), header_value.size()) != 0; diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index 10901b0c0be2..e349a2ee97d0 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -94,7 +94,16 @@ class HeaderUtility { * http://tools.ietf.org/html/rfc7230#section-3.2 * @return bool true if the header values are valid, according to the aforementioned RFC. */ - static bool headerIsValid(const absl::string_view header_value); + static bool headerValueIsValid(const absl::string_view header_value); + + /** + * Checks if header name contains underscore characters. + * Underscore character is allowed in header names by the RFC-7230 and this check is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by + * default allows headers with underscore characters. + * @return bool true if header name contains underscore characters. + */ + static bool headerNameContainsUnderscore(const absl::string_view header_name); /** * Validates that the characters in the authority are valid. diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 3dc482fb96d8..2698501edff8 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -133,7 +133,6 @@ class HeaderValues { const LowerCaseString KeepAlive{"keep-alive"}; const LowerCaseString Location{"location"}; const LowerCaseString Method{":method"}; - const LowerCaseString NoChunks{":no-chunks"}; // Illegal pseudo-header used internally. const LowerCaseString Origin{"origin"}; const LowerCaseString OtSpanContext{"x-ot-span-context"}; const LowerCaseString Path{":path"}; diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 4bb878b024ec..5ec7f16ab906 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -39,6 +39,7 @@ envoy_cc_library( "//source/common/http:utility_lib", "//source/common/http/http1:header_formatter_lib", "//source/common/runtime:runtime_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 309cdab5e70f..85efa2c11102 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -65,9 +65,9 @@ const std::string StreamEncoderImpl::LAST_CHUNK = "0\r\n"; StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) - : connection_(connection), chunk_encoding_(true), processing_100_continue_(false), - is_response_to_head_request_(false), is_content_length_allowed_(true), - header_key_formatter_(header_key_formatter) { + : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true), + processing_100_continue_(false), is_response_to_head_request_(false), + is_content_length_allowed_(true), header_key_formatter_(header_key_formatter) { if (connection_.connection().aboveHighWatermark()) { runHighWatermarkCallbacks(); } @@ -138,14 +138,14 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head // response. Upper layers generally should strip transfer-encoding since it only applies to // HTTP/1.1. The codec will infer it based on the type of response. // for streaming (e.g. SSE stream sent to hystrix dashboard), we do not want - // chunk transfer encoding but we don't have a content-length so we pass "envoy only" - // header to avoid adding chunks + // chunk transfer encoding but we don't have a content-length so disable_chunk_encoding_ is + // consulted before enabling chunk encoding. // // Note that for HEAD requests Envoy does best-effort guessing when there is no // content-length. If a client makes a HEAD request for an upstream resource // with no bytes but the upstream response doesn't include "Content-length: 0", // Envoy will incorrectly assume a subsequent response to GET will be chunk encoded. - if (saw_content_length || headers.NoChunks()) { + if (saw_content_length || disable_chunk_encoding_) { chunk_encoding_ = false; } else { if (processing_100_continue_) { @@ -267,9 +267,11 @@ void ServerConnectionImpl::doFloodProtectionChecks() const { if (!flood_protection_) { return; } - // Before sending another response, make sure it won't exceed flood protection thresholds. + // Before processing another request, make sure that we are below the response flood protection + // threshold. if (outbound_responses_ >= max_outbound_responses_) { - ENVOY_CONN_LOG(trace, "error sending response: Too many pending responses queued", connection_); + ENVOY_CONN_LOG(trace, "error accepting request: too many pending responses queued", + connection_); stats_.response_flood_.inc(); throw FrameFloodException("Too many responses queued."); } @@ -311,9 +313,6 @@ static const char RESPONSE_PREFIX[] = "HTTP/1.1 "; static const char HTTP_10_RESPONSE_PREFIX[] = "HTTP/1.0 "; void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) { - // Do flood checks before attempting to write any responses. - flood_checks_(); - started_response_ = true; // The contract is that client codecs must ensure that :status is present. @@ -388,15 +387,23 @@ http_parser_settings ConnectionImpl::settings_{ return static_cast(parser->data)->onHeadersCompleteBase(); }, [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onBody(at, length); + static_cast(parser->data)->bufferBody(at, length); return 0; }, [](http_parser* parser) -> int { static_cast(parser->data)->onMessageCompleteBase(); return 0; }, - nullptr, // on_chunk_header - nullptr // on_chunk_complete + [](http_parser* parser) -> int { + // A 0-byte chunk header is used to signal the end of the chunked body. + // When this function is called, http-parser holds the size of the chunk in + // parser->content_length. See + // https://github.com/nodejs/http-parser/blob/v2.9.3/http_parser.h#L336 + const bool is_final_chunk = (parser->content_length == 0); + static_cast(parser->data)->onChunkHeader(is_final_chunk); + return 0; + }, + nullptr // on_chunk_complete }; ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, @@ -411,6 +418,8 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& st connection_header_sanitization_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.connection_header_sanitization")), enable_trailers_(enable_trailers), + reject_unsupported_transfer_encodings_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.reject_unsupported_transfer_encodings")), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, [&]() -> void { this->onAboveHighWatermark(); }), max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) { @@ -423,6 +432,7 @@ void ConnectionImpl::completeLastHeader() { ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, current_header_field_.getStringView(), current_header_value_.getStringView()); + checkHeaderNameForUnderscores(); auto& headers_or_trailers = headersOrTrailers(); if (!current_header_field_.empty()) { current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); @@ -450,21 +460,15 @@ bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { return false; } - ssize_t total_parsed = 0; - uint64_t num_slices = data.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - data.getRawSlices(slices.begin(), num_slices); - for (const Buffer::RawSlice& slice : slices) { - total_parsed += slice.len_; - onBody(static_cast(slice.mem_), slice.len_); - } - ENVOY_CONN_LOG(trace, "direct-dispatched {} bytes", connection_, total_parsed); - data.drain(total_parsed); + ENVOY_CONN_LOG(trace, "direct-dispatched {} bytes", connection_, data.length()); + onBody(data); + data.drain(data.length()); return true; } void ConnectionImpl::dispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); + ASSERT(buffered_body_.length() == 0); if (maybeDirectDispatch(data)) { return; @@ -475,15 +479,20 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { ssize_t total_parsed = 0; if (data.length() > 0) { - uint64_t num_slices = data.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - data.getRawSlices(slices.begin(), num_slices); - for (const Buffer::RawSlice& slice : slices) { + for (const Buffer::RawSlice& slice : data.getRawSlices()) { total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); + if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) { + // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at + // this point. + ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + break; + } } + dispatchBufferedBody(); } else { dispatchSlice(nullptr, 0); } + ASSERT(buffered_body_.length() == 0); ENVOY_CONN_LOG(trace, "parsed {} bytes", connection_, total_parsed); data.drain(total_parsed); @@ -537,18 +546,12 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { const absl::string_view header_value = StringUtil::trim(absl::string_view(data, length)); if (strict_header_validation_) { - if (!Http::HeaderUtility::headerIsValid(header_value)) { + if (!Http::HeaderUtility::headerValueIsValid(header_value)) { ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); } - } else if (header_value.find('\0') != absl::string_view::npos) { - // http-parser should filter for this - // (https://tools.ietf.org/html/rfc7230#section-3.2.6), but it doesn't today. HeaderStrings - // have an invariant that they must not contain embedded zero characters - // (NUL, ASCII 0x0). - throw CodecProtocolException("http/1.1 protocol error: header value contains NUL"); } header_parsing_state_ = HeaderParsingState::Value; @@ -605,11 +608,9 @@ int ConnectionImpl::onHeadersCompleteBase() { // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject // transfer-codings it does not understand. if (request_or_response_headers.TransferEncoding()) { - absl::string_view encoding = + const absl::string_view encoding = request_or_response_headers.TransferEncoding()->value().getStringView(); - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.reject_unsupported_transfer_encodings") && - !absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Identity) && + if (reject_unsupported_transfer_encodings_ && !absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked)) { error_code_ = Http::Code::NotImplemented; sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); @@ -624,9 +625,31 @@ int ConnectionImpl::onHeadersCompleteBase() { return handling_upgrade_ ? 2 : rc; } +void ConnectionImpl::bufferBody(const char* data, size_t length) { + buffered_body_.add(data, length); +} + +void ConnectionImpl::dispatchBufferedBody() { + ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + if (buffered_body_.length() > 0) { + onBody(buffered_body_); + buffered_body_.drain(buffered_body_.length()); + } +} + +void ConnectionImpl::onChunkHeader(bool is_final_chunk) { + if (is_final_chunk) { + // Dispatch body before parsing trailers, so body ends up dispatched even if an error is found + // while processing trailers. + dispatchBufferedBody(); + } +} + void ConnectionImpl::onMessageCompleteBase() { ENVOY_CONN_LOG(trace, "message complete", connection_); + dispatchBufferedBody(); + if (handling_upgrade_) { // If this is an upgrade request, swallow the onMessageComplete. The // upgrade payload will be treated as stream body. @@ -663,11 +686,12 @@ void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { onResetStream(reason); } -ServerConnectionImpl::ServerConnectionImpl(Network::Connection& connection, Stats::Scope& stats, - ServerConnectionCallbacks& callbacks, - const Http1Settings& settings, - uint32_t max_request_headers_kb, - const uint32_t max_request_headers_count) +ServerConnectionImpl::ServerConnectionImpl( + Network::Connection& connection, Stats::Scope& stats, ServerConnectionCallbacks& callbacks, + const Http1Settings& settings, uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) : ConnectionImpl(connection, stats, HTTP_REQUEST, max_request_headers_kb, max_request_headers_count, formatter(settings), settings.enable_trailers_), callbacks_(callbacks), codec_settings_(settings), @@ -681,7 +705,8 @@ ServerConnectionImpl::ServerConnectionImpl(Network::Connection& connection, Stat max_outbound_responses_( Runtime::getInteger("envoy.do_not_use_going_away_max_http2_outbound_responses", 2)), flood_protection_( - Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")) {} + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), + headers_with_underscores_action_(headers_with_underscores_action) {} void ServerConnectionImpl::onEncodeComplete() { if (active_request_.value().remote_complete_) { @@ -803,9 +828,14 @@ int ServerConnectionImpl::onHeadersComplete() { void ServerConnectionImpl::onMessageBegin() { if (!resetStreamCalled()) { ASSERT(!active_request_.has_value()); - active_request_.emplace(*this, header_key_formatter_.get(), flood_checks_); + active_request_.emplace(*this, header_key_formatter_.get()); auto& active_request = active_request_.value(); active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_); + + // Check for pipelined request flood as we prepare to accept a new request. + // Parse errors that happen prior to onMessageBegin result in stream termination, it is not + // possible to overflow output buffers with early parse errors. + doFloodProtectionChecks(); } } @@ -815,12 +845,11 @@ void ServerConnectionImpl::onUrl(const char* data, size_t length) { } } -void ServerConnectionImpl::onBody(const char* data, size_t length) { +void ServerConnectionImpl::onBody(Buffer::Instance& data) { ASSERT(!deferred_end_stream_headers_); if (active_request_.has_value()) { - ENVOY_CONN_LOG(trace, "body size={}", connection_, length); - Buffer::OwnedImpl buffer(data, length); - active_request_.value().request_decoder_->decodeData(buffer, false); + ENVOY_CONN_LOG(trace, "body size={}", connection_, data.length()); + active_request_.value().request_decoder_->decodeData(data, false); } } @@ -891,6 +920,27 @@ void ServerConnectionImpl::releaseOutboundResponse( delete fragment; } +void ServerConnectionImpl::checkHeaderNameForUnderscores() { + if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { + if (headers_with_underscores_action_ == + envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + current_header_field_.getStringView()); + stats_.dropped_headers_with_underscores_.inc(); + current_header_field_.clear(); + current_header_value_.clear(); + } else { + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", + connection_, current_header_field_.getStringView()); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); + } + } +} + ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, Stats::Scope& stats, ConnectionCallbacks&, const Http1Settings& settings, const uint32_t max_response_headers_count) @@ -901,6 +951,7 @@ bool ClientConnectionImpl::cannotHaveBody() { if ((pending_response_.has_value() && pending_response_.value().encoder_.headRequest()) || parser_.status_code == 204 || parser_.status_code == 304 || (parser_.status_code >= 200 && parser_.content_length == 0)) { + ASSERT(!pending_response_done_); return true; } else { return false; @@ -917,7 +968,9 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decode ASSERT(connection_.readEnabled()); ASSERT(!pending_response_.has_value()); + ASSERT(pending_response_done_); pending_response_.emplace(*this, header_key_formatter_.get(), &response_decoder); + pending_response_done_ = false; return pending_response_.value().encoder_; } @@ -928,6 +981,7 @@ int ClientConnectionImpl::onHeadersComplete() { if (!pending_response_.has_value() && !resetStreamCalled()) { throw PrematureResponseException(static_cast(parser_.status_code)); } else if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); auto& headers = absl::get(headers_or_trailers_); ENVOY_CONN_LOG(trace, "Client: onHeadersComplete size={}", connection_, headers->size()); headers->setStatus(parser_.status_code); @@ -953,12 +1007,11 @@ int ClientConnectionImpl::onHeadersComplete() { return cannotHaveBody() ? 1 : 0; } -void ClientConnectionImpl::onBody(const char* data, size_t length) { +void ClientConnectionImpl::onBody(Buffer::Instance& data) { ASSERT(!deferred_end_stream_headers_); if (pending_response_.has_value()) { - Buffer::OwnedImpl buffer; - buffer.add(data, length); - pending_response_.value().decoder_->decodeData(buffer, false); + ASSERT(!pending_response_done_); + pending_response_.value().decoder_->decodeData(data, false); } } @@ -969,9 +1022,12 @@ void ClientConnectionImpl::onMessageComplete() { return; } if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); // After calling decodeData() with end stream set to true, we should no longer be able to reset. - PendingResponse response = std::move(pending_response_.value()); - pending_response_.reset(); + PendingResponse& response = pending_response_.value(); + // Encoder is used as part of decode* calls later in this function so pending_response_ can not + // be reset just yet. Preserve the state in pending_response_done_ instead. + pending_response_done_ = true; // Streams are responsible for unwinding any outstanding readDisable(true) // calls done on the underlying connection as they are destroyed. As this is @@ -997,20 +1053,23 @@ void ClientConnectionImpl::onMessageComplete() { } // Reset to ensure no information from one requests persists to the next. + pending_response_.reset(); headers_or_trailers_.emplace(nullptr); } } void ClientConnectionImpl::onResetStream(StreamResetReason reason) { // Only raise reset if we did not already dispatch a complete response. - if (pending_response_.has_value()) { + if (pending_response_.has_value() && !pending_response_done_) { pending_response_.value().encoder_.runResetCallbacks(reason); + pending_response_done_ = true; pending_response_.reset(); } } void ClientConnectionImpl::sendProtocolError(absl::string_view details) { if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); pending_response_.value().encoder_.setDetails(details); } } @@ -1021,10 +1080,9 @@ void ClientConnectionImpl::onAboveHighWatermark() { } void ClientConnectionImpl::onBelowLowWatermark() { - // This can get called without an active stream/request when upstream decides to do bad things - // such as sending multiple responses to the same request, causing us to close the connection, but - // in doing so go below low watermark. - if (pending_response_.has_value()) { + // This can get called without an active stream/request when the response completion causes us to + // close the connection, but in doing so go below low watermark. + if (pending_response_.has_value() && !pending_response_done_) { pending_response_.value().encoder_.runLowWatermarkCallbacks(); } } diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index c613a8221ca1..cdc651c19766 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -8,6 +8,7 @@ #include #include +#include "envoy/config/core/v3/protocol.pb.h" #include "envoy/http/codec.h" #include "envoy/network/connection.h" #include "envoy/stats/scope.h" @@ -27,7 +28,9 @@ namespace Http1 { * All stats for the HTTP/1 codec. @see stats_macros.h */ #define ALL_HTTP1_CODEC_STATS(COUNTER) \ + COUNTER(dropped_headers_with_underscores) \ COUNTER(metadata_not_supported_error) \ + COUNTER(requests_rejected_with_underscores_in_headers) \ COUNTER(response_flood) /** @@ -45,16 +48,23 @@ class ConnectionImpl; class StreamEncoderImpl : public virtual StreamEncoder, public Stream, Logger::Loggable, - public StreamCallbackHelper { + public StreamCallbackHelper, + public Http1StreamEncoderOptions { public: // Http::StreamEncoder void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeMetadata(const MetadataMapVector&) override; Stream& getStream() override { return *this; } + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return *this; } + + // Http::Http1StreamEncoderOptions + void disableChunkEncoding() override { disable_chunk_encoding_ = true; } // Http::Stream void addCallbacks(StreamCallbacks& callbacks) override { addCallbacks_(callbacks); } void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacks_(callbacks); } + // After this is called, for the HTTP/1 codec, the connection should be closed, i.e. no further + // progress may be made with the codec. void resetStream(StreamResetReason reason) override; void readDisable(bool disable) override; uint32_t bufferLimit() override; @@ -74,6 +84,7 @@ class StreamEncoderImpl : public virtual StreamEncoder, static const std::string LAST_CHUNK; ConnectionImpl& connection_; + bool disable_chunk_encoding_ : 1; bool chunk_encoding_ : 1; bool processing_100_continue_ : 1; bool is_response_to_head_request_ : 1; @@ -112,11 +123,8 @@ class StreamEncoderImpl : public virtual StreamEncoder, */ class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { public: - using FloodChecks = std::function; - - ResponseEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter, - FloodChecks& flood_checks) - : StreamEncoderImpl(connection, header_key_formatter), flood_checks_(flood_checks) {} + ResponseEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) + : StreamEncoderImpl(connection, header_key_formatter) {} bool startedResponse() { return started_response_; } @@ -126,7 +134,6 @@ class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { void encodeTrailers(const ResponseTrailerMap& trailers) override { encodeTrailersBase(trailers); } private: - FloodChecks& flood_checks_; bool started_response_{}; }; @@ -212,6 +219,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable; ServerConnectionImpl(Network::Connection& connection, Stats::Scope& stats, ServerConnectionCallbacks& callbacks, const Http1Settings& settings, - uint32_t max_request_headers_kb, const uint32_t max_request_headers_count); + uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); bool supports_http_10() override { return codec_settings_.accept_http_10_; } @@ -343,9 +396,8 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { * An active HTTP/1.1 request. */ struct ActiveRequest { - ActiveRequest(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter, - FloodChecks& flood_checks) - : response_encoder_(connection, header_key_formatter, flood_checks) {} + ActiveRequest(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) + : response_encoder_(connection, header_key_formatter) {} HeaderString request_url_; RequestDecoder* request_decoder_{}; @@ -368,7 +420,7 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { void onMessageBegin() override; void onUrl(const char* data, size_t length) override; int onHeadersComplete() override; - void onBody(const char* data, size_t length) override; + void onBody(Buffer::Instance& data) override; void onMessageComplete() override; void onResetStream(StreamResetReason reason) override; void sendProtocolError(absl::string_view details) override; @@ -398,9 +450,9 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment); void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override; void doFloodProtectionChecks() const; + void checkHeaderNameForUnderscores() override; ServerConnectionCallbacks& callbacks_; - std::function flood_checks_{[&]() { this->doFloodProtectionChecks(); }}; absl::optional active_request_; Http1Settings codec_settings_; const Buffer::OwnedBufferFragmentImpl::Releasor response_buffer_releasor_; @@ -416,6 +468,9 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { // trailers are enabled). The variant is reset to null headers on message complete for assertion // purposes. absl::variant headers_or_trailers_; + // The action to take when a request header name contains underscore characters. + const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; }; /** @@ -447,7 +502,7 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { void onMessageBegin() override {} void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } int onHeadersComplete() override; - void onBody(const char* data, size_t length) override; + void onBody(Buffer::Instance& data) override; void onMessageComplete() override; void onResetStream(StreamResetReason reason) override; void sendProtocolError(absl::string_view details) override; @@ -476,6 +531,12 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { } absl::optional pending_response_; + // TODO(mattklein123): The following bool tracks whether a pending response is complete before + // dispatching callbacks. This is needed so that pending_response_ stays valid during callbacks + // in order to access the stream, but to avoid invoking callbacks that shouldn't be called once + // the response is complete. The existence of this variable is hard to reason about and it should + // be combined with pending_response_ somehow in a follow up cleanup. + bool pending_response_done_{true}; // Set true between receiving 100-Continue headers and receiving the spurious onMessageComplete. bool ignore_message_complete_for_100_continue_{}; // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index a6e03c97083b..ffa5191a2675 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -40,9 +40,11 @@ envoy_cc_library( "//source/common/http:codes_lib", "//source/common/http:exception_lib", "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/http:utility_lib", "//source/common/runtime:runtime_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index cd3235d0c611..2d54b0b1a413 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -17,8 +17,10 @@ #include "common/common/utility.h" #include "common/http/codes.h" #include "common/http/exception.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/utility.h" +#include "common/runtime/runtime_impl.h" #include "absl/container/fixed_array.h" @@ -451,10 +453,7 @@ ConnectionImpl::~ConnectionImpl() { nghttp2_session_del(session_); } void ConnectionImpl::dispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length()); - uint64_t num_slices = data.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - data.getRawSlices(slices.begin(), num_slices); - for (const Buffer::RawSlice& slice : slices) { + for (const Buffer::RawSlice& slice : data.getRawSlices()) { dispatching_ = true; ssize_t rc = nghttp2_session_mem_recv(session_, static_cast(slice.mem_), slice.len_); @@ -850,6 +849,14 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, stats_.headers_cb_no_stream_.inc(); return 0; } + + auto should_return = checkHeaderNameForUnderscores(name.getStringView()); + if (should_return) { + name.clear(); + value.clear(); + return should_return.value(); + } + stream->saveHeader(std::move(name), std::move(value)); if (stream->headers().byteSize() > max_headers_kb_ * 1024 || @@ -930,7 +937,7 @@ void ConnectionImpl::sendSettings( {static_cast(NGHTTP2_SETTINGS_ENABLE_PUSH), disable_push ? 0U : 1U}); } - for (const auto it : http2_options.custom_settings_parameters()) { + for (const auto& it : http2_options.custom_settings_parameters()) { ASSERT(it.identifier().value() <= std::numeric_limits::max()); const bool result = insertParameter({static_cast(it.identifier().value()), it.value().value()}); @@ -1173,11 +1180,14 @@ int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& na ServerConnectionImpl::ServerConnectionImpl( Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count) + const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) : ConnectionImpl(connection, scope, http2_options, max_request_headers_kb, max_request_headers_count), - callbacks_(callbacks) { + callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) { Http2Options h2_options(http2_options); + nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(), h2_options.options()); sendSettings(http2_options, false); @@ -1322,6 +1332,25 @@ void ServerConnectionImpl::dispatch(Buffer::Instance& data) { ConnectionImpl::dispatch(data); } +absl::optional +ServerConnectionImpl::checkHeaderNameForUnderscores(absl::string_view header_name) { + if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(header_name)) { + if (headers_with_underscores_action_ == + envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + header_name); + stats_.dropped_headers_with_underscores_.inc(); + return 0; + } + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, + header_name); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } + return absl::nullopt; +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index e469539f40aa..512f0e65aa73 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -7,6 +7,7 @@ #include #include +#include "envoy/config/core/v3/protocol.pb.h" #include "envoy/event/deferred_deletable.h" #include "envoy/http/codec.h" #include "envoy/network/connection.h" @@ -40,6 +41,7 @@ const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; * All stats for the HTTP/2 codec. @see stats_macros.h */ #define ALL_HTTP2_CODEC_STATS(COUNTER) \ + COUNTER(dropped_headers_with_underscores) \ COUNTER(header_overflow) \ COUNTER(headers_cb_no_stream) \ COUNTER(inbound_empty_frames_flood) \ @@ -47,6 +49,7 @@ const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; COUNTER(inbound_window_update_frames_flood) \ COUNTER(outbound_control_flood) \ COUNTER(outbound_flood) \ + COUNTER(requests_rejected_with_underscores_in_headers) \ COUNTER(rx_messaging_error) \ COUNTER(rx_reset) \ COUNTER(too_many_header_frames) \ @@ -168,6 +171,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable checkHeaderNameForUnderscores(absl::string_view /* header_name */) { + return absl::nullopt; + } + static Http2Callbacks http2_callbacks_; std::list active_streams_; @@ -489,7 +505,9 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_request_headers_kb, - const uint32_t max_request_headers_count); + const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); private: // ConnectionImpl @@ -499,6 +517,7 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { void checkOutboundQueueLimits() override; bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) override; bool checkInboundFrameLimits() override; + absl::optional checkHeaderNameForUnderscores(absl::string_view header_name) override; // Http::Connection // The reason for overriding the dispatch method is to do flood mitigation only when @@ -514,6 +533,10 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { // This flag indicates that downstream data is being dispatched and turns on flood mitigation // in the checkMaxOutbound*Framed methods. bool dispatching_downstream_data_{false}; + + // The action to take when a request header name contains underscore characters. + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; }; } // namespace Http2 diff --git a/source/common/http/http2/metadata_decoder.cc b/source/common/http/http2/metadata_decoder.cc index ca9c4cc40198..836bc8dbbe36 100644 --- a/source/common/http/http2/metadata_decoder.cc +++ b/source/common/http/http2/metadata_decoder.cc @@ -40,10 +40,8 @@ bool MetadataDecoder::onMetadataFrameComplete(bool end_metadata) { } bool MetadataDecoder::decodeMetadataPayloadUsingNghttp2(bool end_metadata) { - // Computes how many slices are needed to get all the data out. - const int num_slices = payload_.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - payload_.getRawSlices(slices.begin(), num_slices); + Buffer::RawSliceVector slices = payload_.getRawSlices(); + const int num_slices = slices.size(); // Data consumed by nghttp2 so far. ssize_t payload_size_consumed = 0; diff --git a/source/common/http/http3/quic_codec_factory.h b/source/common/http/http3/quic_codec_factory.h index 8bd5dc0b4ce0..0b4a72404200 100644 --- a/source/common/http/http3/quic_codec_factory.h +++ b/source/common/http/http3/quic_codec_factory.h @@ -12,7 +12,7 @@ namespace Http { // A factory to create Http::ServerConnection instance for QUIC. class QuicHttpServerConnectionFactory : public Config::UntypedFactory { public: - virtual ~QuicHttpServerConnectionFactory() {} + ~QuicHttpServerConnectionFactory() override = default; virtual std::unique_ptr createQuicServerConnection(Network::Connection& connection, ConnectionCallbacks& callbacks) PURE; @@ -23,7 +23,7 @@ class QuicHttpServerConnectionFactory : public Config::UntypedFactory { // A factory to create Http::ClientConnection instance for QUIC. class QuicHttpClientConnectionFactory : public Config::UntypedFactory { public: - virtual ~QuicHttpClientConnectionFactory() {} + ~QuicHttpClientConnectionFactory() override = default; virtual std::unique_ptr createQuicClientConnection(Network::Connection& connection, ConnectionCallbacks& callbacks) PURE; diff --git a/source/common/http/message_impl.h b/source/common/http/message_impl.h index ce33936dbd68..a42a0a2afd5d 100644 --- a/source/common/http/message_impl.h +++ b/source/common/http/message_impl.h @@ -30,16 +30,11 @@ class MessageImpl : public Message trailers_ = std::move(trailers); } std::string bodyAsString() const override { - std::string ret; if (body_) { - uint64_t num_slices = body_->getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - body_->getRawSlices(slices.begin(), num_slices); - for (const Buffer::RawSlice& slice : slices) { - ret.append(reinterpret_cast(slice.mem_), slice.len_); - } + return body_->toString(); + } else { + return ""; } - return ret; } private: diff --git a/source/common/http/request_id_extension_impl.cc b/source/common/http/request_id_extension_impl.cc new file mode 100644 index 000000000000..ed4022712fc0 --- /dev/null +++ b/source/common/http/request_id_extension_impl.cc @@ -0,0 +1,53 @@ +#include "common/http/request_id_extension_impl.h" + +#include "common/common/utility.h" +#include "common/config/utility.h" +#include "common/http/request_id_extension_uuid_impl.h" + +namespace Envoy { +namespace Http { + +namespace { + +// NoopRequestIDExtension is the implementation used outside of HTTP context. +class NoopRequestIDExtension : public RequestIDExtension { +public: + void set(RequestHeaderMap&, bool) override {} + void setInResponse(ResponseHeaderMap&, const RequestHeaderMap&) override {} + bool modBy(const RequestHeaderMap&, uint64_t&, uint64_t) override { return false; } + TraceStatus getTraceStatus(const RequestHeaderMap&) override { return TraceStatus::NoTrace; } + void setTraceStatus(RequestHeaderMap&, TraceStatus) override {} +}; + +} // namespace + +RequestIDExtensionSharedPtr RequestIDExtensionFactory::fromProto( + const envoy::extensions::filters::network::http_connection_manager::v3::RequestIDExtension& + config, + Server::Configuration::FactoryContext& context) { + const std::string type{TypeUtil::typeUrlToDescriptorFullName(config.typed_config().type_url())}; + auto* factory = + Registry::FactoryRegistry::getFactoryByType( + type); + if (factory == nullptr) { + throw EnvoyException( + fmt::format("Didn't find a registered implementation for type: '{}'", type)); + } + + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + config.typed_config(), context.messageValidationVisitor(), *factory); + return factory->createExtensionInstance(*message, context); +} + +RequestIDExtensionSharedPtr +RequestIDExtensionFactory::defaultInstance(Envoy::Runtime::RandomGenerator& random) { + return std::make_shared(random); +} + +RequestIDExtensionSharedPtr RequestIDExtensionFactory::noopInstance() { + MUTABLE_CONSTRUCT_ON_FIRST_USE(std::shared_ptr, + std::make_shared()); +} + +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/request_id_extension_impl.h b/source/common/http/request_id_extension_impl.h new file mode 100644 index 000000000000..b77ad9b8142b --- /dev/null +++ b/source/common/http/request_id_extension_impl.h @@ -0,0 +1,36 @@ +#pragma once + +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/http/request_id_extension.h" +#include "envoy/server/request_id_extension_config.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Http { +/** + * Request ID Utilities factory that reads the configuration from proto. + */ +class RequestIDExtensionFactory { +public: + /** + * Return a newly created instance of the default RequestIDExtension implementation. + */ + static RequestIDExtensionSharedPtr defaultInstance(Envoy::Runtime::RandomGenerator& random); + + /** + * Return a globally shared instance of the noop RequestIDExtension implementation. + */ + static RequestIDExtensionSharedPtr noopInstance(); + + /** + * Read a RequestIDExtension definition from proto and create it. + */ + static RequestIDExtensionSharedPtr fromProto( + const envoy::extensions::filters::network::http_connection_manager::v3::RequestIDExtension& + config, + Server::Configuration::FactoryContext& context); +}; + +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/request_id_extension_uuid_impl.cc b/source/common/http/request_id_extension_uuid_impl.cc new file mode 100644 index 000000000000..dc95b46f81c7 --- /dev/null +++ b/source/common/http/request_id_extension_uuid_impl.cc @@ -0,0 +1,102 @@ +#include "common/http/request_id_extension_uuid_impl.h" + +#include +#include + +#include "envoy/http/header_map.h" + +#include "common/common/utility.h" +#include "common/runtime/runtime_impl.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Http { + +void UUIDRequestIDExtension::set(RequestHeaderMap& request_headers, bool force) { + if (!force && request_headers.RequestId()) { + return; + } + + // TODO(PiotrSikora) PERF: Write UUID directly to the header map. + std::string uuid = random_.uuid(); + ASSERT(!uuid.empty()); + request_headers.setRequestId(uuid); +} + +void UUIDRequestIDExtension::setInResponse(ResponseHeaderMap& response_headers, + const RequestHeaderMap& request_headers) { + if (request_headers.RequestId()) { + response_headers.setRequestId(request_headers.RequestId()->value().getStringView()); + } +} + +bool UUIDRequestIDExtension::modBy(const RequestHeaderMap& request_headers, uint64_t& out, + uint64_t mod) { + if (request_headers.RequestId() == nullptr) { + return false; + } + const std::string uuid(request_headers.RequestId()->value().getStringView()); + if (uuid.length() < 8) { + return false; + } + + uint64_t value; + if (!StringUtil::atoull(uuid.substr(0, 8).c_str(), value, 16)) { + return false; + } + + out = value % mod; + return true; +} + +TraceStatus UUIDRequestIDExtension::getTraceStatus(const RequestHeaderMap& request_headers) { + if (request_headers.RequestId() == nullptr) { + return TraceStatus::NoTrace; + } + absl::string_view uuid = request_headers.RequestId()->value().getStringView(); + if (uuid.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { + return TraceStatus::NoTrace; + } + + switch (uuid[TRACE_BYTE_POSITION]) { + case TRACE_FORCED: + return TraceStatus::Forced; + case TRACE_SAMPLED: + return TraceStatus::Sampled; + case TRACE_CLIENT: + return TraceStatus::Client; + default: + return TraceStatus::NoTrace; + } +} + +void UUIDRequestIDExtension::setTraceStatus(RequestHeaderMap& request_headers, TraceStatus status) { + if (request_headers.RequestId() == nullptr) { + return; + } + absl::string_view uuid_view = request_headers.RequestId()->value().getStringView(); + if (uuid_view.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { + return; + } + std::string uuid(uuid_view); + + switch (status) { + case TraceStatus::Forced: + uuid[TRACE_BYTE_POSITION] = TRACE_FORCED; + break; + case TraceStatus::Client: + uuid[TRACE_BYTE_POSITION] = TRACE_CLIENT; + break; + case TraceStatus::Sampled: + uuid[TRACE_BYTE_POSITION] = TRACE_SAMPLED; + break; + case TraceStatus::NoTrace: + uuid[TRACE_BYTE_POSITION] = NO_TRACE; + break; + } + request_headers.setRequestId(uuid); +} + +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/request_id_extension_uuid_impl.h b/source/common/http/request_id_extension_uuid_impl.h new file mode 100644 index 000000000000..c3a660e3a2bc --- /dev/null +++ b/source/common/http/request_id_extension_uuid_impl.h @@ -0,0 +1,47 @@ +#pragma once + +#include "envoy/http/request_id_extension.h" + +#include "common/runtime/runtime_impl.h" + +namespace Envoy { +namespace Http { + +// UUIDRequestIDExtension is the default implementation if no other extension is explicitly +// configured. +class UUIDRequestIDExtension : public RequestIDExtension { +public: + explicit UUIDRequestIDExtension(Envoy::Runtime::RandomGenerator& random) : random_(random) {} + + void set(RequestHeaderMap& request_headers, bool force) override; + void setInResponse(ResponseHeaderMap& response_headers, + const RequestHeaderMap& request_headers) override; + bool modBy(const RequestHeaderMap& request_headers, uint64_t& out, uint64_t mod) override; + TraceStatus getTraceStatus(const RequestHeaderMap& request_headers) override; + void setTraceStatus(RequestHeaderMap& request_headers, TraceStatus status) override; + +private: + // Reference to the random generator used to generate new request IDs + Envoy::Runtime::RandomGenerator& random_; + + // Byte on this position has predefined value of 4 for UUID4. + static const int TRACE_BYTE_POSITION = 14; + + // Value of '9' is chosen randomly to distinguish between freshly generated uuid4 and the + // one modified because we sample trace. + static const char TRACE_SAMPLED = '9'; + + // Value of 'a' is chosen randomly to distinguish between freshly generated uuid4 and the + // one modified because we force trace. + static const char TRACE_FORCED = 'a'; + + // Value of 'b' is chosen randomly to distinguish between freshly generated uuid4 and the + // one modified because of client trace. + static const char TRACE_CLIENT = 'b'; + + // Initial value for freshly generated uuid4. + static const char NO_TRACE = '4'; +}; + +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/rest_api_fetcher.cc b/source/common/http/rest_api_fetcher.cc index 15444aeb81dc..612fff3708a3 100644 --- a/source/common/http/rest_api_fetcher.cc +++ b/source/common/http/rest_api_fetcher.cc @@ -28,13 +28,14 @@ RestApiFetcher::~RestApiFetcher() { void RestApiFetcher::initialize() { refresh(); } -void RestApiFetcher::onSuccess(Http::ResponseMessagePtr&& response) { +void RestApiFetcher::onSuccess(const Http::AsyncClient::Request& request, + Http::ResponseMessagePtr&& response) { uint64_t response_code = Http::Utility::getResponseStatus(response->headers()); if (response_code == enumToInt(Http::Code::NotModified)) { requestComplete(); return; } else if (response_code != enumToInt(Http::Code::OK)) { - onFailure(Http::AsyncClient::FailureReason::Reset); + onFailure(request, Http::AsyncClient::FailureReason::Reset); return; } @@ -47,7 +48,8 @@ void RestApiFetcher::onSuccess(Http::ResponseMessagePtr&& response) { requestComplete(); } -void RestApiFetcher::onFailure(Http::AsyncClient::FailureReason reason) { +void RestApiFetcher::onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) { // Currently Http::AsyncClient::FailureReason only has one value: "Reset". ASSERT(reason == Http::AsyncClient::FailureReason::Reset); onFetchFailure(Config::ConfigUpdateFailureReason::ConnectionFailure, nullptr); diff --git a/source/common/http/rest_api_fetcher.h b/source/common/http/rest_api_fetcher.h index 0be0f53a2792..f7dfa76dcde3 100644 --- a/source/common/http/rest_api_fetcher.h +++ b/source/common/http/rest_api_fetcher.h @@ -62,8 +62,9 @@ class RestApiFetcher : public Http::AsyncClient::Callbacks { void requestComplete(); // Http::AsyncClient::Callbacks - void onSuccess(Http::ResponseMessagePtr&& response) override; - void onFailure(Http::AsyncClient::FailureReason reason) override; + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override; + void onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) override; Runtime::RandomGenerator& random_; const std::chrono::milliseconds refresh_interval_; diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index ea4aa41cd4e8..fe9393574a32 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -43,7 +43,7 @@ void validateCustomSettingsParameters( std::unordered_set custom_parameters; // User defined and named parameters with the same SETTINGS identifier can not both be set. - for (const auto it : options.custom_settings_parameters()) { + for (const auto& it : options.custom_settings_parameters()) { ASSERT(it.identifier().value() <= std::numeric_limits::max()); // Check for custom parameter inconsistencies. const auto result = custom_parameters.insert( @@ -552,7 +552,7 @@ bool Utility::sanitizeConnectionHeader(Http::RequestHeaderMap& headers) { bool keep_header = false; // Determine whether the nominated header contains invalid values - const HeaderEntry* nominated_header = NULL; + const HeaderEntry* nominated_header = nullptr; if (lcs_header_to_remove == Http::Headers::get().Connection) { // Remove the connection header from the nominated tokens if it's self nominated diff --git a/source/common/network/BUILD b/source/common/network/BUILD index e8e7e9c6b68f..cecff719b046 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -343,3 +343,19 @@ envoy_cc_library( "//source/common/common:macros", ], ) + +envoy_cc_library( + name = "filter_matcher_lib", + srcs = [ + "filter_matcher.cc", + ], + hdrs = ["filter_matcher.h"], + external_deps = [ + "abseil_str_format", + ], + deps = [ + "//include/envoy/network:filter_interface", + "//include/envoy/network:listen_socket_interface", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], +) diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index a79f7506ee84..fed4bc8aa320 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -43,10 +43,11 @@ void ConnectionImplUtility::updateBufferStats(uint64_t delta, uint64_t new_total std::atomic ConnectionImpl::next_global_id_; ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPtr&& socket, - TransportSocketPtr&& transport_socket, bool connected) + TransportSocketPtr&& transport_socket, + StreamInfo::StreamInfo& stream_info, bool connected) : ConnectionImplBase(dispatcher, next_global_id_++), transport_socket_(std::move(transport_socket)), socket_(std::move(socket)), - stream_info_(dispatcher.timeSource()), filter_manager_(*this), + stream_info_(stream_info), filter_manager_(*this), write_buffer_( dispatcher.getWatermarkFactory().create([this]() -> void { this->onLowWatermark(); }, [this]() -> void { this->onHighWatermark(); })), @@ -685,7 +686,8 @@ ClientConnectionImpl::ClientConnectionImpl( Network::TransportSocketPtr&& transport_socket, const Network::ConnectionSocket::OptionsSharedPtr& options) : ConnectionImpl(dispatcher, std::make_unique(remote_address, options), - std::move(transport_socket), false) { + std::move(transport_socket), stream_info_, false), + stream_info_(dispatcher.timeSource()) { // There are no meaningful socket options or source address semantics for // non-IP sockets, so skip. if (remote_address->ip() != nullptr) { diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 60fc901a1331..26d04eae9348 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -46,7 +46,8 @@ class ConnectionImplUtility { class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallbacks { public: ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPtr&& socket, - TransportSocketPtr&& transport_socket, bool connected); + TransportSocketPtr&& transport_socket, StreamInfo::StreamInfo& stream_info, + bool connected); ~ConnectionImpl() override; @@ -131,7 +132,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback TransportSocketPtr transport_socket_; ConnectionSocketPtr socket_; - StreamInfo::StreamInfoImpl stream_info_; + StreamInfo::StreamInfo& stream_info_; FilterManagerImpl filter_manager_; Buffer::OwnedImpl read_buffer_; @@ -197,6 +198,9 @@ class ClientConnectionImpl : public ConnectionImpl, virtual public ClientConnect // Network::ClientConnection void connect() override; + +private: + StreamInfo::StreamInfoImpl stream_info_; }; } // namespace Network diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index c198642148da..17d608ce2ceb 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -180,7 +180,7 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i if (!completed_ && fallback_if_failed_) { fallback_if_failed_ = false; getAddrInfo(AF_INET); - // Note: Nothing can follow this call to getHostByName due to deletion of this + // Note: Nothing can follow this call to getAddrInfo due to deletion of this // object upon synchronous resolution. return; } @@ -231,7 +231,7 @@ void DnsResolverImpl::onAresSocketStateChange(os_fd_t fd, int read, int write) { ActiveDnsQuery* DnsResolverImpl::resolve(const std::string& dns_name, DnsLookupFamily dns_lookup_family, ResolveCb callback) { // TODO(hennna): Add DNS caching which will allow testing the edge case of a - // failed initial call to getHostByName followed by a synchronous IPv4 + // failed initial call to getAddrInfo followed by a synchronous IPv4 // resolution. // @see DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback for why this is done. diff --git a/source/common/network/filter_manager_impl.cc b/source/common/network/filter_manager_impl.cc index 9701881a9f2f..c083a56eb4ed 100644 --- a/source/common/network/filter_manager_impl.cc +++ b/source/common/network/filter_manager_impl.cc @@ -46,6 +46,7 @@ void FilterManagerImpl::onContinueReading(ActiveReadFilter* filter, std::list::iterator entry; if (!filter) { + connection_.streamInfo().addBytesReceived(buffer_source.getReadBuffer().buffer.length()); entry = upstream_filters_.begin(); } else { entry = std::next(filter->entry()); @@ -100,6 +101,8 @@ FilterStatus FilterManagerImpl::onWrite(ActiveWriteFilter* filter, } } + // Report the final bytes written to the wire + connection_.streamInfo().addBytesSent(buffer_source.getWriteBuffer().buffer.length()); return FilterStatus::Continue; } diff --git a/source/common/network/filter_matcher.cc b/source/common/network/filter_matcher.cc new file mode 100644 index 000000000000..6668850db44e --- /dev/null +++ b/source/common/network/filter_matcher.cc @@ -0,0 +1,53 @@ +#include "common/network/filter_matcher.h" + +#include "envoy/network/filter.h" + +#include "absl/strings/str_format.h" + +namespace Envoy { +namespace Network { + +ListenerFilterMatcherPtr ListenerFilterMatcherBuilder::buildListenerFilterMatcher( + const envoy::config::listener::v3::ListenerFilterChainMatchPredicate& match_config) { + switch (match_config.rule_case()) { + case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kAnyMatch: + return std::make_unique(); + case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kNotMatch: { + return std::make_unique(match_config.not_match()); + } + case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kAndMatch: { + return std::make_unique(match_config.and_match().rules()); + } + case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase::kOrMatch: { + return std::make_unique(match_config.or_match().rules()); + } + case envoy::config::listener::v3::ListenerFilterChainMatchPredicate::RuleCase:: + kDestinationPortRange: { + return std::make_unique(match_config.destination_port_range()); + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +ListenerFilterSetLogicMatcher::ListenerFilterSetLogicMatcher( + absl::Span + predicates) + : sub_matchers_(predicates.length()) { + std::transform(predicates.begin(), predicates.end(), sub_matchers_.begin(), [](const auto* pred) { + return ListenerFilterMatcherBuilder::buildListenerFilterMatcher(*pred); + }); +} + +bool ListenerFilterOrMatcher::matches(ListenerFilterCallbacks& cb) const { + return std::any_of(sub_matchers_.begin(), sub_matchers_.end(), + [&cb](const auto& matcher) { return matcher->matches(cb); }); +} + +bool ListenerFilterAndMatcher::matches(ListenerFilterCallbacks& cb) const { + return std::all_of(sub_matchers_.begin(), sub_matchers_.end(), + [&cb](const auto& matcher) { return matcher->matches(cb); }); +} + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/filter_matcher.h b/source/common/network/filter_matcher.h new file mode 100644 index 000000000000..ab2dd5f8068a --- /dev/null +++ b/source/common/network/filter_matcher.h @@ -0,0 +1,95 @@ +#pragma once + +#include "envoy/config/listener/v3/listener_components.pb.h" +#include "envoy/network/filter.h" +#include "envoy/network/listen_socket.h" + +#include "absl/container/fixed_array.h" + +namespace Envoy { +namespace Network { + +/** + * The helper to transform ListenerFilterChainMatchPredicate message to single matcher. + */ +class ListenerFilterMatcherBuilder { +public: + static ListenerFilterMatcherPtr buildListenerFilterMatcher( + const envoy::config::listener::v3::ListenerFilterChainMatchPredicate& match_config); +}; + +/** + * Any matcher (always matches). + */ +class ListenerFilterAnyMatcher final : public ListenerFilterMatcher { +public: + bool matches(ListenerFilterCallbacks&) const override { return true; } +}; + +class ListenerFilterNotMatcher final : public ListenerFilterMatcher { +public: + ListenerFilterNotMatcher( + const envoy::config::listener::v3::ListenerFilterChainMatchPredicate& match_config) + : sub_matcher_(ListenerFilterMatcherBuilder::buildListenerFilterMatcher(match_config)) {} + bool matches(ListenerFilterCallbacks& cb) const override { return !sub_matcher_->matches(cb); } + +private: + const ListenerFilterMatcherPtr sub_matcher_; +}; + +/** + * Destination port matcher. + */ +class ListenerFilterDstPortMatcher final : public ListenerFilterMatcher { +public: + explicit ListenerFilterDstPortMatcher(const ::envoy::type::v3::Int32Range& range) + : start_(range.start()), end_(range.end()) {} + bool matches(ListenerFilterCallbacks& cb) const override { + const auto& address = cb.socket().localAddress(); + // Match on destination port (only for IP addresses). + if (address->type() == Address::Type::Ip) { + const auto port = address->ip()->port(); + return start_ <= port && port < end_; + } else { + return true; + } + } + +private: + const uint32_t start_; + const uint32_t end_; +}; + +/** + * Matcher for implementing set logic. + */ +class ListenerFilterSetLogicMatcher : public ListenerFilterMatcher { +public: + explicit ListenerFilterSetLogicMatcher( + absl::Span + predicates); + +protected: + absl::FixedArray sub_matchers_; +}; + +class ListenerFilterAndMatcher final : public ListenerFilterSetLogicMatcher { +public: + ListenerFilterAndMatcher( + absl::Span + predicates) + : ListenerFilterSetLogicMatcher(predicates) {} + bool matches(ListenerFilterCallbacks& cb) const override; +}; + +class ListenerFilterOrMatcher final : public ListenerFilterSetLogicMatcher { +public: + ListenerFilterOrMatcher( + absl::Span + predicates) + : ListenerFilterSetLogicMatcher(predicates) {} + bool matches(ListenerFilterCallbacks& cb) const override; +}; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index 6e745ded373b..306c0c425f64 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -1,13 +1,9 @@ #include "common/network/io_socket_handle_impl.h" -#include -#include - #include "envoy/buffer/buffer.h" #include "common/api/os_sys_calls_impl.h" #include "common/network/address_impl.h" -#include "common/network/io_socket_error_impl.h" #include "absl/container/fixed_array.h" #include "absl/types/optional.h" @@ -144,25 +140,25 @@ Api::IoCallUint64Result IoSocketHandleImpl::sendmsg(const Buffer::RawSlice* slic } } -Api::IoCallUint64Result -IoSocketHandleImpl::sysCallResultToIoCallResult(const Api::SysCallSizeResult& result) { - if (result.rc_ >= 0) { - // Return nullptr as IoError upon success. - return Api::IoCallUint64Result(result.rc_, - Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError)); +Address::InstanceConstSharedPtr getAddressFromSockAddrOrDie(const sockaddr_storage& ss, + socklen_t ss_len, os_fd_t fd) { + try { + // Set v6only to false so that mapped-v6 address can be normalize to v4 + // address. Though dual stack may be disabled, it's still okay to assume the + // address is from a dual stack socket. This is because mapped-v6 address + // must come from a dual stack socket. An actual v6 address can come from + // both dual stack socket and v6 only socket. If |peer_addr| is an actual v6 + // address and the socket is actually v6 only, the returned address will be + // regarded as a v6 address from dual stack socket. However, this address is not going to be + // used to create socket. Wrong knowledge of dual stack support won't hurt. + return Address::addressFromSockAddr(ss, ss_len, /*v6only=*/false); + } catch (const EnvoyException& e) { + PANIC(fmt::format("Invalid address for fd: {}, error: {}", fd, e.what())); } - RELEASE_ASSERT(result.errno_ != EINVAL, "Invalid argument passed in."); - return Api::IoCallUint64Result( - /*rc=*/0, - (result.errno_ == EAGAIN - // EAGAIN is frequent enough that its memory allocation should be avoided. - ? Api::IoErrorPtr(IoSocketError::getIoSocketEagainInstance(), - IoSocketError::deleteIoError) - : Api::IoErrorPtr(new IoSocketError(result.errno_), IoSocketError::deleteIoError))); } Address::InstanceConstSharedPtr maybeGetDstAddressFromHeader(const cmsghdr& cmsg, - uint32_t self_port) { + uint32_t self_port, os_fd_t fd) { if (cmsg.cmsg_type == IPV6_PKTINFO) { auto info = reinterpret_cast(CMSG_DATA(&cmsg)); sockaddr_storage ss; @@ -171,7 +167,7 @@ Address::InstanceConstSharedPtr maybeGetDstAddressFromHeader(const cmsghdr& cmsg ipv6_addr->sin6_family = AF_INET6; ipv6_addr->sin6_addr = info->ipi6_addr; ipv6_addr->sin6_port = htons(self_port); - return Address::addressFromSockAddr(ss, sizeof(sockaddr_in6), /*v6only=*/false); + return getAddressFromSockAddrOrDie(ss, sizeof(sockaddr_in6), fd); } #ifndef IP_RECVDSTADDR if (cmsg.cmsg_type == IP_PKTINFO) { @@ -191,7 +187,7 @@ Address::InstanceConstSharedPtr maybeGetDstAddressFromHeader(const cmsghdr& cmsg *addr; #endif ipv4_addr->sin_port = htons(self_port); - return Address::addressFromSockAddr(ss, sizeof(sockaddr_in), /*v6only=*/false); + return getAddressFromSockAddrOrDie(ss, sizeof(sockaddr_in), fd); } return nullptr; } @@ -211,14 +207,10 @@ absl::optional maybeGetPacketsDroppedFromHeader( Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, const uint64_t num_slice, uint32_t self_port, RecvMsgOutput& output) { + ASSERT(!output.msg_.empty()); - // The minimum cmsg buffer size to filled in destination address and packets dropped when - // receiving a packet. It is possible for a received packet to contain both IPv4 and IPv6 - // addresses. - const size_t cmsg_space = CMSG_SPACE(sizeof(int)) + CMSG_SPACE(sizeof(struct in_pktinfo)) + - CMSG_SPACE(sizeof(struct in6_pktinfo)); - absl::FixedArray cbuf(cmsg_space); - memset(cbuf.begin(), 0, cmsg_space); + absl::FixedArray cbuf(cmsg_space_); + memset(cbuf.begin(), 0, cmsg_space_); absl::FixedArray iov(num_slice); uint64_t num_slices_for_read = 0; @@ -240,10 +232,8 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, hdr.msg_iov = iov.begin(); hdr.msg_iovlen = num_slices_for_read; hdr.msg_flags = 0; - auto cmsg = reinterpret_cast(cbuf.begin()); - cmsg->cmsg_len = cmsg_space; - hdr.msg_control = cmsg; - hdr.msg_controllen = cmsg_space; + hdr.msg_control = cbuf.begin(); + hdr.msg_controllen = cmsg_space_; const Api::SysCallSizeResult result = Api::OsSysCallsSingleton::get().recvmsg(fd_, &hdr, 0); if (result.rc_ < 0) { return sysCallResultToIoCallResult(result); @@ -253,44 +243,127 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, fmt::format("Incorrectly set control message length: {}", hdr.msg_controllen)); RELEASE_ASSERT(hdr.msg_namelen > 0, fmt::format("Unable to get remote address from recvmsg() for fd: {}", fd_)); - try { - // Set v6only to false so that mapped-v6 address can be normalize to v4 - // address. Though dual stack may be disabled, it's still okay to assume the - // address is from a dual stack socket. This is because mapped-v6 address - // must come from a dual stack socket. An actual v6 address can come from - // both dual stack socket and v6 only socket. If |peer_addr| is an actual v6 - // address and the socket is actually v6 only, the returned address will be - // regarded as a v6 address from dual stack socket. However, this address is not going to be - // used to create socket. Wrong knowledge of dual stack support won't hurt. - output.peer_address_ = - Address::addressFromSockAddr(peer_addr, hdr.msg_namelen, /*v6only=*/false); - } catch (const EnvoyException& e) { - PANIC(fmt::format("Invalid remote address for fd: {}, error: {}", fd_, e.what())); - } + output.msg_[0].peer_address_ = getAddressFromSockAddrOrDie(peer_addr, hdr.msg_namelen, fd_); - // Get overflow, local and peer addresses from control message. - for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr; cmsg = CMSG_NXTHDR(&hdr, cmsg)) { - if (output.local_address_ == nullptr) { - try { - Address::InstanceConstSharedPtr addr = maybeGetDstAddressFromHeader(*cmsg, self_port); + if (hdr.msg_controllen > 0) { + // Get overflow, local address from control message. + for (struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr; + cmsg = CMSG_NXTHDR(&hdr, cmsg)) { + if (output.msg_[0].local_address_ == nullptr) { + Address::InstanceConstSharedPtr addr = maybeGetDstAddressFromHeader(*cmsg, self_port, fd_); if (addr != nullptr) { // This is a IP packet info message. - output.local_address_ = std::move(addr); + output.msg_[0].local_address_ = std::move(addr); continue; } - } catch (const EnvoyException& e) { - PANIC(fmt::format("Invalid destination address for fd: {}, error: {}", fd_, e.what())); + } + if (output.dropped_packets_ != nullptr) { + absl::optional maybe_dropped = maybeGetPacketsDroppedFromHeader(*cmsg); + if (maybe_dropped) { + *output.dropped_packets_ = *maybe_dropped; + } } } - if (output.dropped_packets_ != nullptr) { - absl::optional maybe_dropped = maybeGetPacketsDroppedFromHeader(*cmsg); - if (maybe_dropped) { - *output.dropped_packets_ = *maybe_dropped; + } + return sysCallResultToIoCallResult(result); +} + +Api::IoCallUint64Result IoSocketHandleImpl::recvmmsg(RawSliceArrays& slices, uint32_t self_port, + RecvMsgOutput& output) { + ASSERT(output.msg_.size() == slices.size()); + if (slices.empty()) { + return sysCallResultToIoCallResult(Api::SysCallIntResult{0, EAGAIN}); + } + const uint32_t num_packets_per_mmsg_call = slices.size(); + absl::FixedArray mmsg_hdr(num_packets_per_mmsg_call); + absl::FixedArray> iovs( + num_packets_per_mmsg_call, absl::FixedArray(slices[0].size())); + absl::FixedArray raw_addresses(num_packets_per_mmsg_call); + absl::FixedArray> cbufs(num_packets_per_mmsg_call, + absl::FixedArray(cmsg_space_)); + + for (uint32_t i = 0; i < num_packets_per_mmsg_call; ++i) { + memset(&raw_addresses[i], 0, sizeof(sockaddr_storage)); + memset(cbufs[i].data(), 0, cbufs[i].size()); + + mmsg_hdr[i].msg_len = 0; + + msghdr* hdr = &mmsg_hdr[i].msg_hdr; + hdr->msg_name = &raw_addresses[i]; + hdr->msg_namelen = sizeof(sockaddr_storage); + ASSERT(!slices[i].empty()); + + for (size_t j = 0; j < slices[i].size(); ++j) { + iovs[i][j].iov_base = slices[i][j].mem_; + iovs[i][j].iov_len = slices[i][j].len_; + } + hdr->msg_iov = iovs[i].data(); + hdr->msg_iovlen = slices[i].size(); + hdr->msg_control = cbufs[i].data(); + hdr->msg_controllen = cbufs[i].size(); + } + + // Set MSG_WAITFORONE so that recvmmsg will not waiting for + // |num_packets_per_mmsg_call| packets to arrive before returning when the + // socket is a blocking socket. + const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().recvmmsg( + fd_, mmsg_hdr.data(), num_packets_per_mmsg_call, MSG_TRUNC | MSG_WAITFORONE, nullptr); + + if (result.rc_ <= 0) { + return sysCallResultToIoCallResult(result); + } + + int num_packets_read = result.rc_; + + for (int i = 0; i < num_packets_read; ++i) { + if (mmsg_hdr[i].msg_len == 0) { + continue; + } + msghdr& hdr = mmsg_hdr[i].msg_hdr; + RELEASE_ASSERT((hdr.msg_flags & MSG_CTRUNC) == 0, + fmt::format("Incorrectly set control message length: {}", hdr.msg_controllen)); + RELEASE_ASSERT(hdr.msg_namelen > 0, + fmt::format("Unable to get remote address from recvmmsg() for fd: {}", fd_)); + if ((hdr.msg_flags & MSG_TRUNC) != 0) { + ENVOY_LOG_MISC(warn, "Dropping truncated UDP packet with size: {}.", mmsg_hdr[i].msg_len); + continue; + } + + output.msg_[i].msg_len_ = mmsg_hdr[i].msg_len; + // Get local and peer addresses for each packet. + output.msg_[i].peer_address_ = + getAddressFromSockAddrOrDie(raw_addresses[i], hdr.msg_namelen, fd_); + if (hdr.msg_controllen > 0) { + struct cmsghdr* cmsg; + for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr; cmsg = CMSG_NXTHDR(&hdr, cmsg)) { + Address::InstanceConstSharedPtr addr = maybeGetDstAddressFromHeader(*cmsg, self_port, fd_); + if (addr != nullptr) { + // This is a IP packet info message. + output.msg_[i].local_address_ = std::move(addr); + break; + } + } + } + } + // Get overflow from first packet header. + if (output.dropped_packets_ != nullptr) { + msghdr& hdr = mmsg_hdr[0].msg_hdr; + if (hdr.msg_controllen > 0) { + struct cmsghdr* cmsg; + for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr; cmsg = CMSG_NXTHDR(&hdr, cmsg)) { + absl::optional maybe_dropped = maybeGetPacketsDroppedFromHeader(*cmsg); + if (maybe_dropped) { + *output.dropped_packets_ = *maybe_dropped; + } } } } return sysCallResultToIoCallResult(result); } +bool IoSocketHandleImpl::supportsMmsg() const { + return Api::OsSysCallsSingleton::get().supportsMmsg(); +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index 4ad18a56d0ae..cd1a97ea3ac1 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -6,6 +6,7 @@ #include "envoy/network/io_handle.h" #include "common/common/logger.h" +#include "common/network/io_socket_error_impl.h" namespace Envoy { namespace Network { @@ -39,11 +40,37 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable + Api::IoCallUint64Result sysCallResultToIoCallResult(const Api::SysCallResult& result) { + if (result.rc_ >= 0) { + // Return nullptr as IoError upon success. + return Api::IoCallUint64Result(result.rc_, + Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError)); + } + RELEASE_ASSERT(result.errno_ != EINVAL, "Invalid argument passed in."); + return Api::IoCallUint64Result( + /*rc=*/0, + (result.errno_ == EAGAIN + // EAGAIN is frequent enough that its memory allocation should be avoided. + ? Api::IoErrorPtr(IoSocketError::getIoSocketEagainInstance(), + IoSocketError::deleteIoError) + : Api::IoErrorPtr(new IoSocketError(result.errno_), IoSocketError::deleteIoError))); + } os_fd_t fd_; + + // The minimum cmsg buffer size to filled in destination address and packets dropped when + // receiving a packet. It is possible for a received packet to contain both IPv4 and IPv6 + // addresses. + const size_t cmsg_space_{CMSG_SPACE(sizeof(int)) + CMSG_SPACE(sizeof(struct in_pktinfo)) + + CMSG_SPACE(sizeof(struct in6_pktinfo))}; }; } // namespace Network diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index ef417c77b1df..0dba0680b1c8 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -15,8 +15,6 @@ namespace Network { class SocketImpl : public virtual Socket { public: - ~SocketImpl() override { close(); } - // Network::Socket const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override { diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index d89e29db289a..891e6f995ef7 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -28,9 +29,10 @@ namespace Envoy { namespace Network { -const std::string Utility::TCP_SCHEME = "tcp://"; -const std::string Utility::UDP_SCHEME = "udp://"; -const std::string Utility::UNIX_SCHEME = "unix://"; +// TODO(lambdai): Remove below re-declare in C++17. +constexpr absl::string_view Utility::TCP_SCHEME; +constexpr absl::string_view Utility::UDP_SCHEME; +constexpr absl::string_view Utility::UNIX_SCHEME; Address::InstanceConstSharedPtr Utility::resolveUrl(const std::string& url) { if (urlIsTcpScheme(url)) { @@ -227,9 +229,9 @@ Address::InstanceConstSharedPtr Utility::getLocalAddress(const Address::IpVersio // If the local address is not found above, then return the loopback address by default. if (ret == nullptr) { if (version == Address::IpVersion::v4) { - ret.reset(new Address::Ipv4Instance("127.0.0.1")); + ret = std::make_shared("127.0.0.1"); } else if (version == Address::IpVersion::v6) { - ret.reset(new Address::Ipv6Instance("::1")); + ret = std::make_shared("::1"); } } return ret; @@ -503,10 +505,9 @@ Utility::protobufAddressSocketType(const envoy::config::core::v3::Address& proto Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, const Buffer::Instance& buffer, const Address::Ip* local_ip, const Address::Instance& peer_address) { - const uint64_t num_slices = buffer.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - buffer.getRawSlices(slices.begin(), num_slices); - return writeToSocket(handle, slices.begin(), num_slices, local_ip, peer_address); + Buffer::RawSliceVector slices = buffer.getRawSlices(); + return writeToSocket(handle, !slices.empty() ? &slices[0] : nullptr, slices.size(), local_ip, + peer_address); } Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, Buffer::RawSlice* slices, @@ -530,17 +531,72 @@ Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, Buffer::RawSlic return send_result; } +void passPayloadToProcessor(uint64_t bytes_read, Buffer::RawSlice& slice, + Buffer::InstancePtr buffer, Address::InstanceConstSharedPtr peer_addess, + Address::InstanceConstSharedPtr local_address, + UdpPacketProcessor& udp_packet_processor, MonotonicTime receive_time) { + // Adjust used memory length. + slice.len_ = std::min(slice.len_, static_cast(bytes_read)); + buffer->commit(&slice, 1); + + RELEASE_ASSERT( + peer_addess != nullptr, + fmt::format("Unable to get remote address on the socket bount to local address: {} ", + local_address->asString())); + + // Unix domain sockets are not supported + RELEASE_ASSERT(peer_addess->type() == Address::Type::Ip, + fmt::format("Unsupported remote address: {} local address: {}, receive size: " + "{}", + peer_addess->asString(), local_address->asString(), bytes_read)); + udp_packet_processor.processPacket(std::move(local_address), std::move(peer_addess), + std::move(buffer), receive_time); +} + Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, const Address::Instance& local_address, UdpPacketProcessor& udp_packet_processor, MonotonicTime receive_time, uint32_t* packets_dropped) { + if (handle.supportsMmsg()) { + const uint32_t num_packets_per_mmsg_call = 16u; + const uint32_t num_slices_per_packet = 1u; + absl::FixedArray buffers(num_packets_per_mmsg_call); + RawSliceArrays slices(num_packets_per_mmsg_call, + absl::FixedArray(num_slices_per_packet)); + for (uint32_t i = 0; i < num_packets_per_mmsg_call; ++i) { + buffers[i] = std::make_unique(); + const uint64_t num_slices = buffers[i]->reserve(udp_packet_processor.maxPacketSize(), + slices[i].data(), num_slices_per_packet); + ASSERT(num_slices == num_slices_per_packet); + } + + IoHandle::RecvMsgOutput output(num_packets_per_mmsg_call, packets_dropped); + Api::IoCallUint64Result result = handle.recvmmsg(slices, local_address.ip()->port(), output); + if (!result.ok()) { + return result; + } + + uint64_t packets_read = result.rc_; + ENVOY_LOG_MISC(trace, "recvmmsg read {} packets", packets_read); + for (uint64_t i = 0; i < packets_read; ++i) { + Buffer::RawSlice* slice = slices[i].data(); + const uint64_t msg_len = output.msg_[i].msg_len_; + ASSERT(msg_len <= slice->len_); + ENVOY_LOG_MISC(debug, "Receive a packet with {} bytes from {}", msg_len, + output.msg_[i].peer_address_->asString()); + passPayloadToProcessor(msg_len, *slice, std::move(buffers[i]), output.msg_[i].peer_address_, + output.msg_[i].local_address_, udp_packet_processor, receive_time); + } + return result; + } + Buffer::InstancePtr buffer = std::make_unique(); Buffer::RawSlice slice; const uint64_t num_slices = buffer->reserve(udp_packet_processor.maxPacketSize(), &slice, 1); - ASSERT(num_slices == 1); + ASSERT(num_slices == 1u); - IoHandle::RecvMsgOutput output(packets_dropped); + IoHandle::RecvMsgOutput output(1, packets_dropped); Api::IoCallUint64Result result = handle.recvmsg(&slice, num_slices, local_address.ip()->port(), output); @@ -548,25 +604,11 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, return result; } - // Adjust used memory length. - slice.len_ = std::min(slice.len_, static_cast(result.rc_)); - buffer->commit(&slice, 1); - ENVOY_LOG_MISC(trace, "recvmsg bytes {}", result.rc_); - RELEASE_ASSERT(output.peer_address_ != nullptr, - fmt::format("Unable to get remote address for fd: {}, local address: {} ", - handle.fd(), local_address.asString())); - - // Unix domain sockets are not supported - RELEASE_ASSERT(output.peer_address_->type() == Address::Type::Ip, - fmt::format("Unsupported remote address: {} local address: {}, receive size: " - "{}", - output.peer_address_->asString(), local_address.asString(), - result.rc_)); - udp_packet_processor.processPacket(std::move(output.local_address_), - std::move(output.peer_address_), std::move(buffer), - receive_time); + passPayloadToProcessor( + result.rc_, slice, std::move(buffer), std::move(output.msg_[0].peer_address_), + std::move(output.msg_[0].local_address_), udp_packet_processor, receive_time); return result; } diff --git a/source/common/network/utility.h b/source/common/network/utility.h index 4e79459538db..152b2ccc471d 100644 --- a/source/common/network/utility.h +++ b/source/common/network/utility.h @@ -64,9 +64,9 @@ static const uint64_t MAX_UDP_PACKET_SIZE = 1500; */ class Utility { public: - static const std::string TCP_SCHEME; - static const std::string UDP_SCHEME; - static const std::string UNIX_SCHEME; + static constexpr absl::string_view TCP_SCHEME{"tcp://"}; + static constexpr absl::string_view UDP_SCHEME{"udp://"}; + static constexpr absl::string_view UNIX_SCHEME{"unix://"}; /** * Resolve a URL. diff --git a/source/common/router/BUILD b/source/common/router/BUILD index f237a458d157..a3203e5651f4 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -123,7 +123,6 @@ envoy_cc_library( hdrs = ["vhds.h"], deps = [ ":config_lib", - "//include/envoy/config:discovery_service_base_interface", "//include/envoy/config:subscription_interface", "//include/envoy/http:codes_interface", "//include/envoy/local_info:local_info_interface", @@ -135,6 +134,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:api_version_lib", + "//source/common/config:subscription_base_interface", "//source/common/config:utility_lib", "//source/common/init:target_lib", "//source/common/protobuf:utility_lib", @@ -152,7 +152,6 @@ envoy_cc_library( hdrs = ["rds_impl.h"], deps = [ ":config_lib", - "//include/envoy/config:discovery_service_base_interface", "//include/envoy/config:subscription_interface", "//include/envoy/http:codes_interface", "//include/envoy/local_info:local_info_interface", @@ -167,6 +166,7 @@ envoy_cc_library( "//source/common/common:cleanup_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:api_version_lib", + "//source/common/config:subscription_base_interface", "//source/common/config:subscription_factory_lib", "//source/common/config:utility_lib", "//source/common/config:version_converter_lib", @@ -210,7 +210,6 @@ envoy_cc_library( ":rds_lib", ":scoped_config_lib", "//include/envoy/config:config_provider_interface", - "//include/envoy/config:discovery_service_base_interface", "//include/envoy/config:subscription_interface", "//include/envoy/router:route_config_provider_manager_interface", "//include/envoy/stats:stats_interface", @@ -219,6 +218,7 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/config:api_version_lib", "//source/common/config:config_provider_lib", + "//source/common/config:subscription_base_interface", "//source/common/config:version_converter_lib", "//source/common/init:manager_lib", "//source/common/init:watcher_lib", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 64e0e33f91f9..801cc0e00ad4 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -926,10 +926,11 @@ RouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::RequestHeaderMap& h VirtualHostImpl::VirtualHostImpl(const envoy::config::route::v3::VirtualHost& virtual_host, const ConfigImpl& global_route_config, Server::Configuration::ServerFactoryContext& factory_context, - ProtobufMessage::ValidationVisitor& validator, + Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validator, bool validate_clusters) : stat_name_pool_(factory_context.scope().symbolTable()), stat_name_(stat_name_pool_.add(virtual_host.name())), + vcluster_scope_(scope.createScope(virtual_host.name() + ".vcluster")), rate_limit_policy_(virtual_host.rate_limits()), global_route_config_(global_route_config), request_headers_parser_(HeaderParser::configure(virtual_host.request_headers_to_add(), virtual_host.request_headers_to_remove())), @@ -942,7 +943,7 @@ VirtualHostImpl::VirtualHostImpl(const envoy::config::route::v3::VirtualHost& vi virtual_host, per_request_buffer_limit_bytes, std::numeric_limits::max())), include_attempt_count_in_request_(virtual_host.include_request_attempt_count()), include_attempt_count_in_response_(virtual_host.include_attempt_count_in_response()), - virtual_cluster_catch_all_(stat_name_pool_) { + virtual_cluster_catch_all_(stat_name_pool_, *vcluster_scope_) { switch (virtual_host.require_tls()) { case envoy::config::route::v3::VirtualHost::NONE: @@ -998,7 +999,8 @@ VirtualHostImpl::VirtualHostImpl(const envoy::config::route::v3::VirtualHost& vi } for (const auto& virtual_cluster : virtual_host.virtual_clusters()) { - virtual_clusters_.push_back(VirtualClusterEntry(virtual_cluster, stat_name_pool_)); + virtual_clusters_.push_back( + VirtualClusterEntry(virtual_cluster, stat_name_pool_, *vcluster_scope_)); } if (virtual_host.has_cors()) { @@ -1007,8 +1009,10 @@ VirtualHostImpl::VirtualHostImpl(const envoy::config::route::v3::VirtualHost& vi } VirtualHostImpl::VirtualClusterEntry::VirtualClusterEntry( - const envoy::config::route::v3::VirtualCluster& virtual_cluster, Stats::StatNamePool& pool) - : stat_name_(pool.add(virtual_cluster.name())) { + const envoy::config::route::v3::VirtualCluster& virtual_cluster, Stats::StatNamePool& pool, + Stats::Scope& scope) + : VirtualClusterBase(pool.add(virtual_cluster.name()), + scope.createScope(virtual_cluster.name())) { if (virtual_cluster.hidden_envoy_deprecated_pattern().empty() == virtual_cluster.headers().empty()) { throw EnvoyException("virtual clusters must define either 'pattern' or 'headers'"); @@ -1066,10 +1070,12 @@ const VirtualHostImpl* RouteMatcher::findWildcardVirtualHost( RouteMatcher::RouteMatcher(const envoy::config::route::v3::RouteConfiguration& route_config, const ConfigImpl& global_route_config, Server::Configuration::ServerFactoryContext& factory_context, - ProtobufMessage::ValidationVisitor& validator, bool validate_clusters) { + ProtobufMessage::ValidationVisitor& validator, bool validate_clusters) + : vhost_scope_(factory_context.scope().createScope("vhost")) { for (const auto& virtual_host_config : route_config.virtual_hosts()) { - VirtualHostSharedPtr virtual_host(new VirtualHostImpl( - virtual_host_config, global_route_config, factory_context, validator, validate_clusters)); + VirtualHostSharedPtr virtual_host(new VirtualHostImpl(virtual_host_config, global_route_config, + factory_context, *vhost_scope_, validator, + validate_clusters)); for (const std::string& domain_name : virtual_host_config.domains()) { const std::string domain = Http::LowerCaseString(domain_name).get(); bool duplicate_found = false; diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index c2e654b00e84..89add8a903e5 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -157,7 +157,7 @@ class VirtualHostImpl : public VirtualHost { public: VirtualHostImpl(const envoy::config::route::v3::VirtualHost& virtual_host, const ConfigImpl& global_route_config, - Server::Configuration::ServerFactoryContext& factory_context, + Server::Configuration::ServerFactoryContext& factory_context, Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validator, bool validate_clusters); RouteConstSharedPtr getRouteFromEntries(const Http::RequestHeaderMap& headers, @@ -187,32 +187,38 @@ class VirtualHostImpl : public VirtualHost { private: enum class SslRequirements { None, ExternalOnly, All }; - struct VirtualClusterEntry : public VirtualCluster { - VirtualClusterEntry(const envoy::config::route::v3::VirtualCluster& virtual_cluster, - Stats::StatNamePool& pool); + struct VirtualClusterBase : public VirtualCluster { + public: + VirtualClusterBase(Stats::StatName stat_name, Stats::ScopePtr&& scope) + : stat_name_(stat_name), scope_(std::move(scope)), stats_(generateStats(*scope_)) {} // Router::VirtualCluster Stats::StatName statName() const override { return stat_name_; } + VirtualClusterStats& stats() const override { return stats_; } + private: const Stats::StatName stat_name_; - std::vector headers_; + Stats::ScopePtr scope_; + mutable VirtualClusterStats stats_; }; - class CatchAllVirtualCluster : public VirtualCluster { - public: - explicit CatchAllVirtualCluster(Stats::StatNamePool& pool) : stat_name_(pool.add("other")) {} + struct VirtualClusterEntry : public VirtualClusterBase { + VirtualClusterEntry(const envoy::config::route::v3::VirtualCluster& virtual_cluster, + Stats::StatNamePool& pool, Stats::Scope& scope); - // Router::VirtualCluster - Stats::StatName statName() const override { return stat_name_; } + std::vector headers_; + }; - private: - const Stats::StatName stat_name_; + struct CatchAllVirtualCluster : public VirtualClusterBase { + explicit CatchAllVirtualCluster(Stats::StatNamePool& pool, Stats::Scope& scope) + : VirtualClusterBase(pool.add("other"), scope.createScope("other")) {} }; static const std::shared_ptr SSL_REDIRECT_ROUTE; Stats::StatNamePool stat_name_pool_; const Stats::StatName stat_name_; + Stats::ScopePtr vcluster_scope_; std::vector routes_; std::vector virtual_clusters_; SslRequirements ssl_requirements_; @@ -842,6 +848,7 @@ class RouteMatcher { const WildcardVirtualHosts& wildcard_virtual_hosts, SubstringFunction substring_function) const; + Stats::ScopePtr vhost_scope_; std::unordered_map virtual_hosts_; // std::greater as a minor optimization to iterate from more to less specific // diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index aacf4d9a3712..88ac5741b648 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -108,7 +108,7 @@ parseUpstreamMetadataField(absl::string_view params_str) { switch (value->kind_case()) { case ProtobufWkt::Value::kNumberValue: - return fmt::format("{}", value->number_value()); + return fmt::format("{:g}", value->number_value()); case ProtobufWkt::Value::kStringValue: return value->string_value(); @@ -242,6 +242,11 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam return StreamInfo::Utility::formatDownstreamAddressNoPort( *stream_info.downstreamLocalAddress()); }; + } else if (field_name == "DOWNSTREAM_LOCAL_PORT") { + field_extractor_ = [](const Envoy::StreamInfo::StreamInfo& stream_info) { + return StreamInfo::Utility::formatDownstreamAddressJustPort( + *stream_info.downstreamLocalAddress()); + }; } else if (field_name == "DOWNSTREAM_PEER_URI_SAN") { field_extractor_ = sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) { diff --git a/source/common/router/header_parser.cc b/source/common/router/header_parser.cc index 3eafa1394a90..758f89c86bc6 100644 --- a/source/common/router/header_parser.cc +++ b/source/common/router/header_parser.cc @@ -36,9 +36,9 @@ std::string unescape(absl::string_view sv) { return absl::StrReplaceAll(sv, {{"% // The statement machine does minimal validation of the arguments (if any) and does not know the // names of valid variables. Interpretation of the variable name and arguments is delegated to // StreamInfoHeaderFormatter. -HeaderFormatterPtr -parseInternal(const envoy::config::core::v3::HeaderValueOption& header_value_option) { - const std::string& key = header_value_option.header().key(); +HeaderFormatterPtr parseInternal(const envoy::config::core::v3::HeaderValue& header_value, + bool append) { + const std::string& key = header_value.key(); // PGV constraints provide this guarantee. ASSERT(!key.empty()); // We reject :path/:authority rewriting, there is already a well defined mechanism to @@ -50,9 +50,7 @@ parseInternal(const envoy::config::core::v3::HeaderValueOption& header_value_opt throw EnvoyException(":-prefixed headers may not be modified"); } - const bool append = PROTOBUF_GET_WRAPPED_OR_DEFAULT(header_value_option, append, true); - - absl::string_view format(header_value_option.header().value()); + absl::string_view format(header_value.value()); if (format.empty()) { return std::make_unique("", append); } @@ -226,7 +224,8 @@ HeaderParserPtr HeaderParser::configure( HeaderParserPtr header_parser(new HeaderParser()); for (const auto& header_value_option : headers_to_add) { - HeaderFormatterPtr header_formatter = parseInternal(header_value_option); + const bool append = PROTOBUF_GET_WRAPPED_OR_DEFAULT(header_value_option, append, true); + HeaderFormatterPtr header_formatter = parseInternal(header_value_option.header(), append); header_parser->headers_to_add_.emplace_back( Http::LowerCaseString(header_value_option.header().key()), std::move(header_formatter)); @@ -235,6 +234,21 @@ HeaderParserPtr HeaderParser::configure( return header_parser; } +HeaderParserPtr HeaderParser::configure( + const Protobuf::RepeatedPtrField& headers_to_add, + bool append) { + HeaderParserPtr header_parser(new HeaderParser()); + + for (const auto& header_value : headers_to_add) { + HeaderFormatterPtr header_formatter = parseInternal(header_value, append); + + header_parser->headers_to_add_.emplace_back(Http::LowerCaseString(header_value.key()), + std::move(header_formatter)); + } + + return header_parser; +} + HeaderParserPtr HeaderParser::configure( const Protobuf::RepeatedPtrField& headers_to_add, const Protobuf::RepeatedPtrField& headers_to_remove) { diff --git a/source/common/router/header_parser.h b/source/common/router/header_parser.h index 57842d00c0aa..d32832f414b4 100644 --- a/source/common/router/header_parser.h +++ b/source/common/router/header_parser.h @@ -30,6 +30,15 @@ class HeaderParser { static HeaderParserPtr configure( const Protobuf::RepeatedPtrField& headers_to_add); + /* + * @param headers_to_add defines headers to add during calls to evaluateHeaders. + * @param append defines whether headers will be appended or replaced. + * @return HeaderParserPtr a configured HeaderParserPtr. + */ + static HeaderParserPtr + configure(const Protobuf::RepeatedPtrField& headers_to_add, + bool append); + /* * @param headers_to_add defines headers to add during calls to evaluateHeaders * @param headers_to_remove defines headers to remove during calls to evaluateHeaders diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index a52fc43a25eb..11f45c683dd3 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -68,7 +68,9 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( const uint64_t manager_identifier, Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix, Envoy::Router::RouteConfigProviderManagerImpl& route_config_provider_manager) - : route_config_name_(rds.route_config_name()), factory_context_(factory_context), + : Envoy::Config::SubscriptionBase( + rds.config_source().resource_api_version()), + route_config_name_(rds.route_config_name()), factory_context_(factory_context), validator_(factory_context.messageValidationContext().dynamicValidationVisitor()), parent_init_target_(fmt::format("RdsRouteConfigSubscription init {}", route_config_name_), [this]() { local_init_manager_.initialize(local_init_watcher_); }), @@ -82,7 +84,7 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( stat_prefix_(stat_prefix), stats_({ALL_RDS_STATS(POOL_COUNTER(*scope_))}), route_config_provider_manager_(route_config_provider_manager), manager_identifier_(manager_identifier) { - const auto resource_name = getResourceName(rds.config_source().resource_api_version()); + const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( rds.config_source(), Grpc::Common::typeUrl(resource_name), *scope_, *this); diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index 09d73ab38048..5481e3398064 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -27,7 +27,6 @@ #include "common/common/callback_impl.h" #include "common/common/cleanup.h" #include "common/common/logger.h" -#include "common/config/resources.h" #include "common/init/manager_impl.h" #include "common/init/target_impl.h" #include "common/init/watcher_impl.h" diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 4e8c7fb3ae8f..2952b7a34157 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -31,18 +31,17 @@ const uint32_t RetryPolicy::RETRY_ON_GRPC_DEADLINE_EXCEEDED; const uint32_t RetryPolicy::RETRY_ON_GRPC_RESOURCE_EXHAUSTED; const uint32_t RetryPolicy::RETRY_ON_GRPC_UNAVAILABLE; -RetryStatePtr RetryStateImpl::create(const RetryPolicy& route_policy, - Http::RequestHeaderMap& request_headers, - const Upstream::ClusterInfo& cluster, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, - Event::Dispatcher& dispatcher, - Upstream::ResourcePriority priority) { +RetryStatePtr +RetryStateImpl::create(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, + const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) { RetryStatePtr ret; // We short circuit here and do not bother with an allocation if there is no chance we will retry. if (request_headers.EnvoyRetryOn() || request_headers.EnvoyRetryGrpcOn() || route_policy.retryOn()) { - ret.reset(new RetryStateImpl(route_policy, request_headers, cluster, runtime, random, + ret.reset(new RetryStateImpl(route_policy, request_headers, cluster, vcluster, runtime, random, dispatcher, priority)); } @@ -54,12 +53,13 @@ RetryStatePtr RetryStateImpl::create(const RetryPolicy& route_policy, RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, - const Upstream::ClusterInfo& cluster, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, - Upstream::ResourcePriority priority) - : cluster_(cluster), runtime_(runtime), random_(random), dispatcher_(dispatcher), - retry_on_(route_policy.retryOn()), retries_remaining_(route_policy.numRetries()), - priority_(priority), retry_host_predicates_(route_policy.retryHostPredicates()), + const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) + : cluster_(cluster), vcluster_(vcluster), runtime_(runtime), random_(random), + dispatcher_(dispatcher), retry_on_(route_policy.retryOn()), + retries_remaining_(route_policy.numRetries()), priority_(priority), + retry_host_predicates_(route_policy.retryHostPredicates()), retry_priority_(route_policy.retryPriority()), retriable_status_codes_(route_policy.retriableStatusCodes()), retriable_headers_(route_policy.retriableHeaders()) { @@ -213,6 +213,9 @@ RetryStatus RetryStateImpl::shouldRetry(bool would_retry, DoRetryCallback callba // and it was successful. if (callback_ && !would_retry) { cluster_.stats().upstream_rq_retry_success_.inc(); + if (vcluster_) { + vcluster_->stats().upstream_rq_retry_success_.inc(); + } } resetRetry(); @@ -221,7 +224,13 @@ RetryStatus RetryStateImpl::shouldRetry(bool would_retry, DoRetryCallback callba return RetryStatus::No; } + // The request has exhausted the number of retries allotted to it by the retry policy configured + // (or the x-envoy-max-retries header). if (retries_remaining_ == 0) { + cluster_.stats().upstream_rq_retry_limit_exceeded_.inc(); + if (vcluster_) { + vcluster_->stats().upstream_rq_retry_limit_exceeded_.inc(); + } return RetryStatus::NoRetryLimitExceeded; } @@ -229,6 +238,9 @@ RetryStatus RetryStateImpl::shouldRetry(bool would_retry, DoRetryCallback callba if (!cluster_.resourceManager(priority_).retries().canCreate()) { cluster_.stats().upstream_rq_retry_overflow_.inc(); + if (vcluster_) { + vcluster_->stats().upstream_rq_retry_overflow_.inc(); + } return RetryStatus::NoOverflow; } @@ -240,6 +252,9 @@ RetryStatus RetryStateImpl::shouldRetry(bool would_retry, DoRetryCallback callba callback_ = callback; cluster_.resourceManager(priority_).retries().inc(); cluster_.stats().upstream_rq_retry_.inc(); + if (vcluster_) { + vcluster_->stats().upstream_rq_retry_.inc(); + } enableBackoffTimer(); return RetryStatus::Yes; } diff --git a/source/common/router/retry_state_impl.h b/source/common/router/retry_state_impl.h index 3b44d56931fc..79d355cd6499 100644 --- a/source/common/router/retry_state_impl.h +++ b/source/common/router/retry_state_impl.h @@ -26,9 +26,9 @@ class RetryStateImpl : public RetryState { public: static RetryStatePtr create(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, - const Upstream::ClusterInfo& cluster, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, - Upstream::ResourcePriority priority); + const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority); ~RetryStateImpl() override; /** @@ -87,9 +87,9 @@ class RetryStateImpl : public RetryState { private: RetryStateImpl(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, - const Upstream::ClusterInfo& cluster, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, - Upstream::ResourcePriority priority); + const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority); void enableBackoffTimer(); void resetRetry(); @@ -97,6 +97,7 @@ class RetryStateImpl : public RetryState { RetryStatus shouldRetry(bool would_retry, DoRetryCallback callback); const Upstream::ClusterInfo& cluster_; + const VirtualCluster* vcluster_; Runtime::Loader& runtime_; Runtime::RandomGenerator& random_; Event::Dispatcher& dispatcher_; diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 44d1d58ee3cf..4171f18440be 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -541,23 +541,27 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, } } - auto conn_pool = getConnPool(); + Http::ConnectionPool::Instance* http_pool = getHttpConnPool(); + Upstream::HostDescriptionConstSharedPtr host; - if (!conn_pool) { + if (http_pool) { + host = http_pool->host(); + } else { sendNoHealthyUpstreamResponse(); return Http::FilterHeadersStatus::StopIteration; } + if (debug_config && debug_config->append_upstream_host_) { // The hostname and address will be appended to any local or upstream responses from this point, // possibly in addition to the cluster name. - modify_headers = [modify_headers, debug_config, conn_pool](Http::ResponseHeaderMap& headers) { + modify_headers = [modify_headers, debug_config, host](Http::ResponseHeaderMap& headers) { modify_headers(headers); headers.addCopy( debug_config->hostname_header_.value_or(Http::Headers::get().EnvoyUpstreamHostname), - conn_pool->host()->hostname()); + host->hostname()); headers.addCopy(debug_config->host_address_header_.value_or( Http::Headers::get().EnvoyUpstreamHostAddress), - conn_pool->host()->address()->asString()); + host->address()->asString()); }; } @@ -608,15 +612,15 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, route_entry_->finalizeRequestHeaders(headers, callbacks_->streamInfo(), !config_.suppress_envoy_headers_); - FilterUtility::setUpstreamScheme( - headers, conn_pool->host()->transportSocketFactory().implementsSecureTransport()); + FilterUtility::setUpstreamScheme(headers, + host->transportSocketFactory().implementsSecureTransport()); // Ensure an http transport scheme is selected before continuing with decoding. ASSERT(headers.Scheme()); - retry_state_ = - createRetryState(route_entry_->retryPolicy(), headers, *cluster_, config_.runtime_, - config_.random_, callbacks_->dispatcher(), route_entry_->priority()); + retry_state_ = createRetryState(route_entry_->retryPolicy(), headers, *cluster_, + request_vcluster_, config_.runtime_, config_.random_, + callbacks_->dispatcher(), route_entry_->priority()); // Determine which shadow policies to use. It's possible that we don't do any shadowing due to // runtime keys. @@ -632,7 +636,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // Hang onto the modify_headers function for later use in handling upstream responses. modify_headers_ = modify_headers; - UpstreamRequestPtr upstream_request = std::make_unique(*this, *conn_pool); + UpstreamRequestPtr upstream_request = + std::make_unique(*this, std::make_unique(*http_pool)); upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); upstream_requests_.front()->encodeHeaders(end_stream); if (end_stream) { @@ -642,7 +647,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, return Http::FilterHeadersStatus::StopIteration; } -Http::ConnectionPool::Instance* Filter::getConnPool() { +Http::ConnectionPool::Instance* Filter::getHttpConnPool() { // Choose protocol based on cluster configuration and downstream connection // Note: Cluster may downgrade HTTP2 to HTTP1 based on runtime configuration. Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); @@ -821,6 +826,9 @@ void Filter::onResponseTimeout() { // Don't do work for upstream requests we've already seen headers for. if (upstream_request->awaitingHeaders()) { cluster_->stats().upstream_rq_timeout_.inc(); + if (request_vcluster_) { + request_vcluster_->stats().upstream_rq_timeout_.inc(); + } if (cluster_->timeoutBudgetStats().has_value()) { // Cancel firing per-try timeout information, because the per-try timeout did not come into @@ -1045,6 +1053,12 @@ void Filter::onUpstreamReset(Http::StreamResetReason reset_reason, onUpstreamAbort(Http::Code::ServiceUnavailable, response_flags, body, dropped, details); } +void Filter::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) { + if (retry_state_ && host) { + retry_state_->onHostAttempted(host); + } +} + StreamInfo::ResponseFlag Filter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) { switch (reset_reason) { @@ -1422,8 +1436,15 @@ void Filter::doRetry() { attempt_count_++; ASSERT(pending_retries_ > 0); pending_retries_--; - Http::ConnectionPool::Instance* conn_pool = getConnPool(); - if (!conn_pool) { + UpstreamRequestPtr upstream_request; + + Http::ConnectionPool::Instance* conn_pool = getHttpConnPool(); + if (conn_pool) { + upstream_request = + std::make_unique(*this, std::make_unique(*conn_pool)); + } + + if (!upstream_request) { sendNoHealthyUpstreamResponse(); cleanup(); return; @@ -1434,7 +1455,6 @@ void Filter::doRetry() { } ASSERT(response_timeout_ || timeout_.global_timeout_.count() == 0); - UpstreamRequestPtr upstream_request = std::make_unique(*this, *conn_pool); UpstreamRequest* upstream_request_tmp = upstream_request.get(); upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); upstream_requests_.front()->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_); @@ -1461,11 +1481,11 @@ uint32_t Filter::numRequestsAwaitingHeaders() { RetryStatePtr ProdFilter::createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers, - const Upstream::ClusterInfo& cluster, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, - Upstream::ResourcePriority priority) { - return RetryStateImpl::create(policy, request_headers, cluster, runtime, random, dispatcher, - priority); + const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) { + return RetryStateImpl::create(policy, request_headers, cluster, vcluster, runtime, random, + dispatcher, priority); } } // namespace Router diff --git a/source/common/router/router.h b/source/common/router/router.h index 7eaf5daeddd1..fb5e52945469 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -244,13 +244,52 @@ class FilterConfig { using FilterConfigSharedPtr = std::shared_ptr; class UpstreamRequest; +using UpstreamRequestPtr = std::unique_ptr; + +// The interface the UpstreamRequest has to interact with the router filter. +// Split out primarily for unit test mocks. +class RouterFilterInterface { +public: + virtual ~RouterFilterInterface() = default; + + virtual void onUpstream100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers, + UpstreamRequest& upstream_request) PURE; + virtual void onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPtr&& headers, + UpstreamRequest& upstream_request, bool end_stream) PURE; + virtual void onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request, + bool end_stream) PURE; + virtual void onUpstreamTrailers(Http::ResponseTrailerMapPtr&& trailers, + UpstreamRequest& upstream_request) PURE; + virtual void onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map) PURE; + virtual void onUpstreamReset(Http::StreamResetReason reset_reason, + absl::string_view transport_failure, + UpstreamRequest& upstream_request) PURE; + virtual void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) PURE; + virtual void onPerTryTimeout(UpstreamRequest& upstream_request) PURE; + + virtual Http::StreamDecoderFilterCallbacks* callbacks() PURE; + virtual Upstream::ClusterInfoConstSharedPtr cluster() PURE; + virtual FilterConfig& config() PURE; + virtual FilterUtility::TimeoutData timeout() PURE; + virtual Http::RequestHeaderMap* downstreamHeaders() PURE; + virtual Http::RequestTrailerMap* downstreamTrailers() PURE; + virtual bool downstreamResponseStarted() const PURE; + virtual bool downstreamEndStream() const PURE; + virtual uint32_t attemptCount() const PURE; + virtual const VirtualCluster* requestVcluster() const PURE; + virtual const RouteEntry* routeEntry() const PURE; + virtual const std::list& upstreamRequests() const PURE; + virtual const UpstreamRequest* finalUpstreamRequest() const PURE; + virtual TimeSource& timeSource() PURE; +}; /** * Service routing filter. */ class Filter : Logger::Loggable, public Http::StreamDecoderFilter, - public Upstream::LoadBalancerContextBase { + public Upstream::LoadBalancerContextBase, + public RouterFilterInterface { public: Filter(FilterConfig& config) : config_(config), final_upstream_request_(nullptr), downstream_response_started_(false), @@ -259,6 +298,9 @@ class Filter : Logger::Loggable, ~Filter() override; + static StreamInfo::ResponseFlag + streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason); + // Http::StreamFilterBase void onDestroy() override; @@ -377,14 +419,42 @@ class Filter : Logger::Loggable, return cookie_value; } + // RouterFilterInterface + void onUpstream100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers, + UpstreamRequest& upstream_request) override; + void onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPtr&& headers, + UpstreamRequest& upstream_request, bool end_stream) override; + void onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request, + bool end_stream) override; + void onUpstreamTrailers(Http::ResponseTrailerMapPtr&& trailers, + UpstreamRequest& upstream_request) override; + void onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map) override; + void onUpstreamReset(Http::StreamResetReason reset_reason, absl::string_view transport_failure, + UpstreamRequest& upstream_request) override; + void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) override; + void onPerTryTimeout(UpstreamRequest& upstream_request) override; + Http::StreamDecoderFilterCallbacks* callbacks() override { return callbacks_; } + Upstream::ClusterInfoConstSharedPtr cluster() override { return cluster_; } + FilterConfig& config() override { return config_; } + FilterUtility::TimeoutData timeout() override { return timeout_; } + Http::RequestHeaderMap* downstreamHeaders() override { return downstream_headers_; } + Http::RequestTrailerMap* downstreamTrailers() override { return downstream_trailers_; } + bool downstreamResponseStarted() const override { return downstream_response_started_; } + bool downstreamEndStream() const override { return downstream_end_stream_; } + uint32_t attemptCount() const override { return attempt_count_; } + const VirtualCluster* requestVcluster() const override { return request_vcluster_; } + const RouteEntry* routeEntry() const override { return route_entry_; } + const std::list& upstreamRequests() const override { + return upstream_requests_; + } + const UpstreamRequest* finalUpstreamRequest() const override { return final_upstream_request_; } + TimeSource& timeSource() override { return config_.timeSource(); } + private: - using UpstreamRequestPtr = std::unique_ptr; friend class UpstreamRequest; RetryStatePtr retry_state_; - StreamInfo::ResponseFlag streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason); - Stats::StatName upstreamZone(Upstream::HostDescriptionConstSharedPtr upstream_host); void chargeUpstreamCode(uint64_t response_status_code, const Http::ResponseHeaderMap& response_headers, @@ -393,22 +463,18 @@ class Filter : Logger::Loggable, bool dropped); void chargeUpstreamAbort(Http::Code code, bool dropped, UpstreamRequest& upstream_request); void cleanup(); - virtual RetryStatePtr createRetryState(const RetryPolicy& policy, - Http::RequestHeaderMap& request_headers, - const Upstream::ClusterInfo& cluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, - Event::Dispatcher& dispatcher, - Upstream::ResourcePriority priority) PURE; - Http::ConnectionPool::Instance* getConnPool(); + virtual RetryStatePtr + createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers, + const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) PURE; + Http::ConnectionPool::Instance* getHttpConnPool(); void maybeDoShadowing(); bool maybeRetryReset(Http::StreamResetReason reset_reason, UpstreamRequest& upstream_request); uint32_t numRequestsAwaitingHeaders(); void onGlobalTimeout(); - void onPerTryTimeout(UpstreamRequest& upstream_request); void onRequestComplete(); void onResponseTimeout(); - void onUpstream100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers, - UpstreamRequest& upstream_request); // Handle an upstream request aborted due to a local timeout. void onSoftPerTryTimeout(); void onSoftPerTryTimeout(UpstreamRequest& upstream_request); @@ -418,15 +484,7 @@ class Filter : Logger::Loggable, // downstream if appropriate. void onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_flag, absl::string_view body, bool dropped, absl::string_view details); - void onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPtr&& headers, - UpstreamRequest& upstream_request, bool end_stream); - void onUpstreamData(Buffer::Instance& data, UpstreamRequest& upstream_request, bool end_stream); - void onUpstreamTrailers(Http::ResponseTrailerMapPtr&& trailers, - UpstreamRequest& upstream_request); - void onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map); void onUpstreamComplete(UpstreamRequest& upstream_request); - void onUpstreamReset(Http::StreamResetReason reset_reason, absl::string_view transport_failure, - UpstreamRequest& upstream_request); // Reset all in-flight upstream requests. void resetAll(); // Reset all in-flight upstream requests that do NOT match the passed argument. This is used @@ -445,7 +503,6 @@ class Filter : Logger::Loggable, void handleNon5xxResponseHeaders(absl::optional grpc_status, UpstreamRequest& upstream_request, bool end_stream, uint64_t grpc_to_http_status); - TimeSource& timeSource() { return config_.timeSource(); } Http::Context& httpContext() { return config_.http_context_; } FilterConfig& config_; @@ -493,7 +550,8 @@ class ProdFilter : public Filter { private: // Filter RetryStatePtr createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers, - const Upstream::ClusterInfo& cluster, Runtime::Loader& runtime, + const Upstream::ClusterInfo& cluster, + const VirtualCluster* vcluster, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) override; }; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index a75e81e08fd0..89ec116125eb 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -99,13 +99,15 @@ ScopedRdsConfigSubscription::ScopedRdsConfigSubscription( ScopedRoutesConfigProviderManager& config_provider_manager) : DeltaConfigSubscriptionInstance("SRDS", manager_identifier, config_provider_manager, factory_context), + Envoy::Config::SubscriptionBase( + rds_config_source.resource_api_version()), factory_context_(factory_context), name_(name), scope_key_builder_(scope_key_builder), scope_(factory_context.scope().createScope(stat_prefix + "scoped_rds." + name + ".")), stats_({ALL_SCOPED_RDS_STATS(POOL_COUNTER(*scope_))}), rds_config_source_(std::move(rds_config_source)), validation_visitor_(factory_context.messageValidationContext().dynamicValidationVisitor()), stat_prefix_(stat_prefix), route_config_provider_manager_(route_config_provider_manager) { - const auto resource_name = getResourceName(rds_config_source_.resource_api_version()); + const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( scoped_rds.scoped_rds_config_source(), Grpc::Common::typeUrl(resource_name), *scope_, diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index 9b57aad1b213..befa51a21dc2 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -4,7 +4,6 @@ #include "envoy/common/callback.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/discovery_service_base.h" #include "envoy/config/route/v3/scoped_route.pb.h" #include "envoy/config/subscription.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" @@ -13,6 +12,7 @@ #include "envoy/stats/scope.h" #include "common/config/config_provider_impl.h" +#include "common/config/subscription_base.h" #include "common/init/manager_impl.h" #include "common/router/rds_impl.h" #include "common/router/scoped_config_impl.h" diff --git a/source/common/router/shadow_writer_impl.h b/source/common/router/shadow_writer_impl.h index c59fd00daa58..2224912e8856 100644 --- a/source/common/router/shadow_writer_impl.h +++ b/source/common/router/shadow_writer_impl.h @@ -24,8 +24,8 @@ class ShadowWriterImpl : Logger::Loggable, const Http::AsyncClient::RequestOptions& options) override; // Http::AsyncClient::Callbacks - void onSuccess(Http::ResponseMessagePtr&&) override {} - void onFailure(Http::AsyncClient::FailureReason) override {} + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override {} + void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override {} private: Upstream::ClusterManager& cm_; diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 400639a1e88e..ac048161c93c 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -31,7 +31,7 @@ #include "common/network/upstream_subject_alt_names.h" #include "common/router/config_impl.h" #include "common/router/debug_config.h" -#include "common/router/retry_state_impl.h" +#include "common/router/router.h" #include "common/runtime/runtime_impl.h" #include "common/stream_info/uint32_accessor_impl.h" #include "common/tracing/http_tracer_impl.h" @@ -41,26 +41,30 @@ namespace Envoy { namespace Router { -UpstreamRequest::UpstreamRequest(Filter& parent, Http::ConnectionPool::Instance& pool) - : parent_(parent), conn_pool_(pool), grpc_rq_success_deferred_(false), - stream_info_(pool.protocol(), parent_.callbacks_->dispatcher().timeSource()), - start_time_(parent_.callbacks_->dispatcher().timeSource().monotonicTime()), +UpstreamRequest::UpstreamRequest(RouterFilterInterface& parent, + std::unique_ptr&& conn_pool) + : parent_(parent), conn_pool_(std::move(conn_pool)), grpc_rq_success_deferred_(false), + stream_info_(parent_.callbacks()->dispatcher().timeSource()), + start_time_(parent_.callbacks()->dispatcher().timeSource().monotonicTime()), calling_encode_headers_(false), upstream_canary_(false), decode_complete_(false), encode_complete_(false), encode_trailers_(false), retried_(false), awaiting_headers_(true), outlier_detection_timeout_recorded_(false), create_per_try_timeout_on_request_complete_(false), - record_timeout_budget_(parent_.cluster_->timeoutBudgetStats().has_value()) { - if (parent_.config_.start_child_span_) { - span_ = parent_.callbacks_->activeSpan().spawnChild( - parent_.callbacks_->tracingConfig(), "router " + parent.cluster_->name() + " egress", + record_timeout_budget_(parent_.cluster()->timeoutBudgetStats().has_value()) { + if (parent_.config().start_child_span_) { + span_ = parent_.callbacks()->activeSpan().spawnChild( + parent_.callbacks()->tracingConfig(), "router " + parent.cluster()->name() + " egress", parent.timeSource().systemTime()); - if (parent.attempt_count_ != 1) { + if (parent.attemptCount() != 1) { // This is a retry request, add this metadata to span. - span_->setTag(Tracing::Tags::get().RetryCount, std::to_string(parent.attempt_count_ - 1)); + span_->setTag(Tracing::Tags::get().RetryCount, std::to_string(parent.attemptCount() - 1)); } } - stream_info_.healthCheck(parent_.callbacks_->streamInfo().healthCheck()); + stream_info_.healthCheck(parent_.callbacks()->streamInfo().healthCheck()); + if (conn_pool_->protocol().has_value()) { + stream_info_.protocol(conn_pool_->protocol().value()); + } } UpstreamRequest::~UpstreamRequest() { @@ -79,40 +83,41 @@ UpstreamRequest::~UpstreamRequest() { // If desired, fire the per-try histogram when the UpstreamRequest // completes. if (record_timeout_budget_) { - Event::Dispatcher& dispatcher = parent_.callbacks_->dispatcher(); + Event::Dispatcher& dispatcher = parent_.callbacks()->dispatcher(); const MonotonicTime end_time = dispatcher.timeSource().monotonicTime(); const std::chrono::milliseconds response_time = std::chrono::duration_cast(end_time - start_time_); - parent_.cluster_->timeoutBudgetStats() + parent_.cluster() + ->timeoutBudgetStats() ->upstream_rq_timeout_budget_per_try_percent_used_.recordValue( - FilterUtility::percentageOfTimeout(response_time, parent_.timeout_.per_try_timeout_)); + FilterUtility::percentageOfTimeout(response_time, parent_.timeout().per_try_timeout_)); } stream_info_.setUpstreamTiming(upstream_timing_); stream_info_.onRequestComplete(); - for (const auto& upstream_log : parent_.config_.upstream_logs_) { - upstream_log->log(parent_.downstream_headers_, upstream_headers_.get(), + for (const auto& upstream_log : parent_.config().upstream_logs_) { + upstream_log->log(parent_.downstreamHeaders(), upstream_headers_.get(), upstream_trailers_.get(), stream_info_); } } void UpstreamRequest::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) { - ScopeTrackerScopeState scope(&parent_.callbacks_->scope(), parent_.callbacks_->dispatcher()); + ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); ASSERT(100 == Http::Utility::getResponseStatus(*headers)); parent_.onUpstream100ContinueHeaders(std::move(headers), *this); } void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) { - ScopeTrackerScopeState scope(&parent_.callbacks_->scope(), parent_.callbacks_->dispatcher()); + ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); // TODO(rodaine): This is actually measuring after the headers are parsed and not the first // byte. - upstream_timing_.onFirstUpstreamRxByteReceived(parent_.callbacks_->dispatcher().timeSource()); + upstream_timing_.onFirstUpstreamRxByteReceived(parent_.callbacks()->dispatcher().timeSource()); maybeEndDecode(end_stream); awaiting_headers_ = false; - if (!parent_.config_.upstream_logs_.empty()) { + if (!parent_.config().upstream_logs_.empty()) { upstream_headers_ = Http::createHeaderMap(*headers); } const uint64_t response_code = Http::Utility::getResponseStatus(*headers); @@ -121,7 +126,7 @@ void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool e } void UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) { - ScopeTrackerScopeState scope(&parent_.callbacks_->scope(), parent_.callbacks_->dispatcher()); + ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); @@ -129,10 +134,10 @@ void UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) { } void UpstreamRequest::decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) { - ScopeTrackerScopeState scope(&parent_.callbacks_->scope(), parent_.callbacks_->dispatcher()); + ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); maybeEndDecode(true); - if (!parent_.config_.upstream_logs_.empty()) { + if (!parent_.config().upstream_logs_.empty()) { upstream_trailers_ = Http::createHeaderMap(*trailers); } parent_.onUpstreamTrailers(std::move(trailers), *this); @@ -144,7 +149,7 @@ void UpstreamRequest::decodeMetadata(Http::MetadataMapPtr&& metadata_map) { void UpstreamRequest::maybeEndDecode(bool end_stream) { if (end_stream) { - upstream_timing_.onLastUpstreamRxByteReceived(parent_.callbacks_->dispatcher().timeSource()); + upstream_timing_.onLastUpstreamRxByteReceived(parent_.callbacks()->dispatcher().timeSource()); decode_complete_ = true; } } @@ -152,23 +157,15 @@ void UpstreamRequest::maybeEndDecode(bool end_stream) { void UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) { stream_info_.onUpstreamHostSelected(host); upstream_host_ = host; - parent_.callbacks_->streamInfo().onUpstreamHostSelected(host); - if (parent_.retry_state_ && host) { - parent_.retry_state_->onHostAttempted(host); - } + parent_.callbacks()->streamInfo().onUpstreamHostSelected(host); + parent_.onUpstreamHostSelected(host); } void UpstreamRequest::encodeHeaders(bool end_stream) { ASSERT(!encode_complete_); encode_complete_ = end_stream; - // It's possible for a reset to happen inline within the newStream() call. In this case, we - // might get deleted inline as well. Only write the returned handle out if it is not nullptr to - // deal with this case. - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(*this, *this); - if (handle) { - conn_pool_stream_handle_ = handle; - } + conn_pool_->newStream(this); } void UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream) { @@ -176,23 +173,23 @@ void UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream) { encode_complete_ = end_stream; if (!upstream_) { - ENVOY_STREAM_LOG(trace, "buffering {} bytes", *parent_.callbacks_, data.length()); + ENVOY_STREAM_LOG(trace, "buffering {} bytes", *parent_.callbacks(), data.length()); if (!buffered_request_body_) { buffered_request_body_ = std::make_unique( [this]() -> void { this->enableDataFromDownstreamForFlowControl(); }, [this]() -> void { this->disableDataFromDownstreamForFlowControl(); }); - buffered_request_body_->setWatermarks(parent_.callbacks_->decoderBufferLimit()); + buffered_request_body_->setWatermarks(parent_.callbacks()->decoderBufferLimit()); } buffered_request_body_->move(data); } else { ASSERT(downstream_metadata_map_vector_.empty()); - ENVOY_STREAM_LOG(trace, "proxying {} bytes", *parent_.callbacks_, data.length()); + ENVOY_STREAM_LOG(trace, "proxying {} bytes", *parent_.callbacks(), data.length()); stream_info_.addBytesSent(data.length()); upstream_->encodeData(data, end_stream); if (end_stream) { - upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks_->dispatcher().timeSource()); + upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource()); } } } @@ -203,23 +200,23 @@ void UpstreamRequest::encodeTrailers(const Http::RequestTrailerMap& trailers) { encode_trailers_ = true; if (!upstream_) { - ENVOY_STREAM_LOG(trace, "buffering trailers", *parent_.callbacks_); + ENVOY_STREAM_LOG(trace, "buffering trailers", *parent_.callbacks()); } else { ASSERT(downstream_metadata_map_vector_.empty()); - ENVOY_STREAM_LOG(trace, "proxying trailers", *parent_.callbacks_); + ENVOY_STREAM_LOG(trace, "proxying trailers", *parent_.callbacks()); upstream_->encodeTrailers(trailers); - upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks_->dispatcher().timeSource()); + upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource()); } } void UpstreamRequest::encodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr) { if (!upstream_) { ENVOY_STREAM_LOG(trace, "upstream_ not ready. Store metadata_map to encode later: {}", - *parent_.callbacks_, *metadata_map_ptr); + *parent_.callbacks(), *metadata_map_ptr); downstream_metadata_map_vector_.emplace_back(std::move(metadata_map_ptr)); } else { - ENVOY_STREAM_LOG(trace, "Encode metadata: {}", *parent_.callbacks_, *metadata_map_ptr); + ENVOY_STREAM_LOG(trace, "Encode metadata: {}", *parent_.callbacks(), *metadata_map_ptr); Http::MetadataMapVector metadata_map_vector; metadata_map_vector.emplace_back(std::move(metadata_map_ptr)); upstream_->encodeMetadata(metadata_map_vector); @@ -228,7 +225,7 @@ void UpstreamRequest::encodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr) { void UpstreamRequest::onResetStream(Http::StreamResetReason reason, absl::string_view transport_failure_reason) { - ScopeTrackerScopeState scope(&parent_.callbacks_->scope(), parent_.callbacks_->dispatcher()); + ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); if (span_ != nullptr) { // Add tags about reset. @@ -239,7 +236,7 @@ void UpstreamRequest::onResetStream(Http::StreamResetReason reason, clearRequestEncoder(); awaiting_headers_ = false; if (!calling_encode_headers_) { - stream_info_.setResponseFlag(parent_.streamResetReasonToResponseFlag(reason)); + stream_info_.setResponseFlag(Filter::streamResetReasonToResponseFlag(reason)); parent_.onUpstreamReset(reason, transport_failure_reason, *this); } else { deferred_reset_reason_ = reason; @@ -257,15 +254,13 @@ void UpstreamRequest::resetStream() { span_->setTag(Tracing::Tags::get().Canceled, Tracing::Tags::get().True); } - if (conn_pool_stream_handle_) { - ENVOY_STREAM_LOG(debug, "cancelling pool request", *parent_.callbacks_); + if (conn_pool_->cancelAnyPendingRequest()) { + ENVOY_STREAM_LOG(debug, "canceled pool request", *parent_.callbacks()); ASSERT(!upstream_); - conn_pool_stream_handle_->cancel(); - conn_pool_stream_handle_ = nullptr; } if (upstream_) { - ENVOY_STREAM_LOG(debug, "resetting pool request", *parent_.callbacks_); + ENVOY_STREAM_LOG(debug, "resetting pool request", *parent_.callbacks()); upstream_->resetStream(); clearRequestEncoder(); } @@ -273,25 +268,25 @@ void UpstreamRequest::resetStream() { void UpstreamRequest::setupPerTryTimeout() { ASSERT(!per_try_timeout_); - if (parent_.timeout_.per_try_timeout_.count() > 0) { + if (parent_.timeout().per_try_timeout_.count() > 0) { per_try_timeout_ = - parent_.callbacks_->dispatcher().createTimer([this]() -> void { onPerTryTimeout(); }); - per_try_timeout_->enableTimer(parent_.timeout_.per_try_timeout_); + parent_.callbacks()->dispatcher().createTimer([this]() -> void { onPerTryTimeout(); }); + per_try_timeout_->enableTimer(parent_.timeout().per_try_timeout_); } } void UpstreamRequest::onPerTryTimeout() { // If we've sent anything downstream, ignore the per try timeout and let the response continue // up to the global timeout - if (!parent_.downstream_response_started_) { - ENVOY_STREAM_LOG(debug, "upstream per try timeout", *parent_.callbacks_); + if (!parent_.downstreamResponseStarted()) { + ENVOY_STREAM_LOG(debug, "upstream per try timeout", *parent_.callbacks()); stream_info_.setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout); parent_.onPerTryTimeout(*this); } else { ENVOY_STREAM_LOG(debug, "ignored upstream per try timeout due to already started downstream response", - *parent_.callbacks_); + *parent_.callbacks()); } } @@ -313,49 +308,59 @@ void UpstreamRequest::onPoolFailure(Http::ConnectionPool::PoolFailureReason reas onResetStream(reset_reason, transport_failure_reason); } -void UpstreamRequest::onPoolReady(Http::RequestEncoder& request_encoder, - Upstream::HostDescriptionConstSharedPtr host, - const StreamInfo::StreamInfo& info) { +void UpstreamRequest::onPoolReady( + std::unique_ptr&& upstream, Upstream::HostDescriptionConstSharedPtr host, + const Network::Address::InstanceConstSharedPtr& upstream_local_address, + const StreamInfo::StreamInfo& info) { // This may be called under an existing ScopeTrackerScopeState but it will unwind correctly. - ScopeTrackerScopeState scope(&parent_.callbacks_->scope(), parent_.callbacks_->dispatcher()); - ENVOY_STREAM_LOG(debug, "pool ready", *parent_.callbacks_); + ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); + ENVOY_STREAM_LOG(debug, "pool ready", *parent_.callbacks()); + upstream_ = std::move(upstream); + + if (parent_.requestVcluster()) { + // The cluster increases its upstream_rq_total_ counter right before firing this onPoolReady + // callback. Hence, the upstream request increases the virtual cluster's upstream_rq_total_ stat + // here. + parent_.requestVcluster()->stats().upstream_rq_total_.inc(); + } host->outlierDetector().putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess); onUpstreamHostSelected(host); - stream_info_.setUpstreamLocalAddress(request_encoder.getStream().connectionLocalAddress()); - parent_.callbacks_->streamInfo().setUpstreamLocalAddress( - request_encoder.getStream().connectionLocalAddress()); + stream_info_.setUpstreamLocalAddress(upstream_local_address); + parent_.callbacks()->streamInfo().setUpstreamLocalAddress(upstream_local_address); stream_info_.setUpstreamSslConnection(info.downstreamSslConnection()); - parent_.callbacks_->streamInfo().setUpstreamSslConnection(info.downstreamSslConnection()); + parent_.callbacks()->streamInfo().setUpstreamSslConnection(info.downstreamSslConnection()); - if (parent_.downstream_end_stream_) { + if (parent_.downstreamEndStream()) { setupPerTryTimeout(); } else { create_per_try_timeout_on_request_complete_ = true; } - conn_pool_stream_handle_ = nullptr; - setRequestEncoder(request_encoder); + // Make sure the connection manager will inform the downstream watermark manager when the + // downstream buffers are overrun. This may result in immediate watermark callbacks referencing + // the encoder. + parent_.callbacks()->addDownstreamWatermarkCallbacks(downstream_watermark_manager_); + calling_encode_headers_ = true; - if (parent_.route_entry_->autoHostRewrite() && !host->hostname().empty()) { - parent_.downstream_headers_->setHost(host->hostname()); + if (parent_.routeEntry()->autoHostRewrite() && !host->hostname().empty()) { + parent_.downstreamHeaders()->setHost(host->hostname()); } if (span_ != nullptr) { - span_->injectContext(*parent_.downstream_headers_); + span_->injectContext(*parent_.downstreamHeaders()); } - upstream_timing_.onFirstUpstreamTxByteSent(parent_.callbacks_->dispatcher().timeSource()); + upstream_timing_.onFirstUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource()); const bool end_stream = !buffered_request_body_ && encode_complete_ && !encode_trailers_; // If end_stream is set in headers, and there are metadata to send, delays end_stream. The case // only happens when decoding headers filters return ContinueAndEndStream. const bool delay_headers_end_stream = end_stream && !downstream_metadata_map_vector_.empty(); - request_encoder.encodeHeaders(*parent_.downstream_headers_, - end_stream && !delay_headers_end_stream); + upstream_->encodeHeaders(*parent_.downstreamHeaders(), end_stream && !delay_headers_end_stream); calling_encode_headers_ = false; // It is possible to get reset in the middle of an encodeHeaders() call. This happens for @@ -368,43 +373,35 @@ void UpstreamRequest::onPoolReady(Http::RequestEncoder& request_encoder, } else { // Encode metadata after headers and before any other frame type. if (!downstream_metadata_map_vector_.empty()) { - ENVOY_STREAM_LOG(debug, "Send metadata onPoolReady. {}", *parent_.callbacks_, + ENVOY_STREAM_LOG(debug, "Send metadata onPoolReady. {}", *parent_.callbacks(), downstream_metadata_map_vector_); - request_encoder.encodeMetadata(downstream_metadata_map_vector_); + upstream_->encodeMetadata(downstream_metadata_map_vector_); downstream_metadata_map_vector_.clear(); if (delay_headers_end_stream) { Buffer::OwnedImpl empty_data(""); - request_encoder.encodeData(empty_data, true); + upstream_->encodeData(empty_data, true); } } if (buffered_request_body_) { stream_info_.addBytesSent(buffered_request_body_->length()); - request_encoder.encodeData(*buffered_request_body_, encode_complete_ && !encode_trailers_); + upstream_->encodeData(*buffered_request_body_, encode_complete_ && !encode_trailers_); } if (encode_trailers_) { - request_encoder.encodeTrailers(*parent_.downstream_trailers_); + upstream_->encodeTrailers(*parent_.downstreamTrailers()); } if (encode_complete_) { - upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks_->dispatcher().timeSource()); + upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource()); } } } -void UpstreamRequest::setRequestEncoder(Http::RequestEncoder& request_encoder) { - upstream_.reset(new HttpUpstream(*this, &request_encoder)); - // Now that there is an encoder, have the connection manager inform the manager when the - // downstream buffers are overrun. This may result in immediate watermark callbacks referencing - // the encoder. - parent_.callbacks_->addDownstreamWatermarkCallbacks(downstream_watermark_manager_); -} - void UpstreamRequest::clearRequestEncoder() { // Before clearing the encoder, unsubscribe from callbacks. if (upstream_) { - parent_.callbacks_->removeDownstreamWatermarkCallbacks(downstream_watermark_manager_); + parent_.callbacks()->removeDownstreamWatermarkCallbacks(downstream_watermark_manager_); } upstream_.reset(); } @@ -417,13 +414,13 @@ void UpstreamRequest::DownstreamWatermarkManager::onAboveWriteBufferHighWatermar // downstream connection, or 2) the watermark was hit due to THIS filter // instance writing back the "winning" upstream request. In either case we // can disable reads from upstream. - ASSERT(!parent_.parent_.final_upstream_request_ || - &parent_ == parent_.parent_.final_upstream_request_); + ASSERT(!parent_.parent_.finalUpstreamRequest() || + &parent_ == parent_.parent_.finalUpstreamRequest()); // The downstream connection is overrun. Pause reads from upstream. // If there are multiple calls to readDisable either the codec (H2) or the underlying // Network::Connection (H1) will handle reference counting. - parent_.parent_.cluster_->stats().upstream_flow_control_paused_reading_total_.inc(); + parent_.parent_.cluster()->stats().upstream_flow_control_paused_reading_total_.inc(); parent_.upstream_->readDisable(true); } @@ -432,7 +429,7 @@ void UpstreamRequest::DownstreamWatermarkManager::onBelowWriteBufferLowWatermark // One source of connection blockage has buffer available. Pass this on to the stream, which // will resume reads if this was the last remaining high watermark. - parent_.parent_.cluster_->stats().upstream_flow_control_resumed_reading_total_.inc(); + parent_.parent_.cluster()->stats().upstream_flow_control_resumed_reading_total_.inc(); parent_.upstream_->readDisable(false); } @@ -442,13 +439,13 @@ void UpstreamRequest::disableDataFromDownstreamForFlowControl() { // already seen the full downstream request (downstream_end_stream_) then // disabling reads is a noop. // This assert condition must be true because - // parent_.upstream_requests_.size() can only be greater than 1 in the + // parent_.upstreamRequests().size() can only be greater than 1 in the // case of a per-try-timeout with hedge_on_per_try_timeout enabled, and // the per try timeout timer is started only after downstream_end_stream_ // is true. - ASSERT(parent_.upstream_requests_.size() == 1 || parent_.downstream_end_stream_); - parent_.cluster_->stats().upstream_flow_control_backed_up_total_.inc(); - parent_.callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); + ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream()); + parent_.cluster()->stats().upstream_flow_control_backed_up_total_.inc(); + parent_.callbacks()->onDecoderFilterAboveWriteBufferHighWatermark(); } void UpstreamRequest::enableDataFromDownstreamForFlowControl() { @@ -457,13 +454,51 @@ void UpstreamRequest::enableDataFromDownstreamForFlowControl() { // requests. If we've already seen the full downstream request // (downstream_end_stream_) then enabling reads is a noop. // This assert condition must be true because - // parent_.upstream_requests_.size() can only be greater than 1 in the + // parent_.upstreamRequests().size() can only be greater than 1 in the // case of a per-try-timeout with hedge_on_per_try_timeout enabled, and // the per try timeout timer is started only after downstream_end_stream_ // is true. - ASSERT(parent_.upstream_requests_.size() == 1 || parent_.downstream_end_stream_); - parent_.cluster_->stats().upstream_flow_control_drained_total_.inc(); - parent_.callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); + ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream()); + parent_.cluster()->stats().upstream_flow_control_drained_total_.inc(); + parent_.callbacks()->onDecoderFilterBelowWriteBufferLowWatermark(); +} + +void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { + callbacks_ = callbacks; + // It's possible for a reset to happen inline within the newStream() call. In this case, we + // might get deleted inline as well. Only write the returned handle out if it is not nullptr to + // deal with this case. + Http::ConnectionPool::Cancellable* handle = + conn_pool_.newStream(*callbacks->upstreamRequest(), *this); + if (handle) { + conn_pool_stream_handle_ = handle; + } +} + +bool HttpConnPool::cancelAnyPendingRequest() { + if (conn_pool_stream_handle_) { + conn_pool_stream_handle_->cancel(); + conn_pool_stream_handle_ = nullptr; + return true; + } + return false; +} + +absl::optional HttpConnPool::protocol() const { return conn_pool_.protocol(); } + +void HttpConnPool::onPoolFailure(Http::ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) { + callbacks_->onPoolFailure(reason, transport_failure_reason, host); +} + +void HttpConnPool::onPoolReady(Http::RequestEncoder& request_encoder, + Upstream::HostDescriptionConstSharedPtr host, + const StreamInfo::StreamInfo& info) { + conn_pool_stream_handle_ = nullptr; + auto upstream = std::make_unique(*callbacks_->upstreamRequest(), &request_encoder); + callbacks_->onPoolReady(std::move(upstream), host, + request_encoder.getStream().connectionLocalAddress(), info); } } // namespace Router diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 191da261fb96..0cf36565bed4 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -8,6 +8,7 @@ #include "envoy/http/codec.h" #include "envoy/http/codes.h" +#include "envoy/http/conn_pool.h" #include "envoy/http/filter.h" #include "envoy/stats/scope.h" #include "envoy/tcp/conn_pool.h" @@ -19,22 +20,55 @@ #include "common/common/linked_object.h" #include "common/common/logger.h" #include "common/config/well_known_names.h" -#include "common/router/router.h" #include "common/stream_info/stream_info_impl.h" namespace Envoy { namespace Router { -class Filter; class GenericUpstream; +class GenericConnectionPoolCallbacks; +class RouterFilterInterface; +class UpstreamRequest; + +// An API for wrapping either an HTTP or a TCP connection pool. +class GenericConnPool : public Logger::Loggable { +public: + virtual ~GenericConnPool() = default; + + // Called to create a new HTTP stream or TCP connection. The implementation + // is then responsible for calling either onPoolReady or onPoolFailure on the + // supplied GenericConnectionPoolCallbacks. + virtual void newStream(GenericConnectionPoolCallbacks* callbacks) PURE; + // Called to cancel a call to newStream. Returns true if a newStream request + // was canceled, false otherwise. + virtual bool cancelAnyPendingRequest() PURE; + // Optionally returns the protocol for the connection pool. + virtual absl::optional protocol() const PURE; +}; + +// An API for the UpstreamRequest to get callbacks from either an HTTP or TCP +// connection pool. +class GenericConnectionPoolCallbacks { +public: + virtual ~GenericConnectionPoolCallbacks() = default; + + virtual void onPoolFailure(Http::ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) PURE; + virtual void onPoolReady(std::unique_ptr&& upstream, + Upstream::HostDescriptionConstSharedPtr host, + const Network::Address::InstanceConstSharedPtr& upstream_local_address, + const StreamInfo::StreamInfo& info) PURE; + virtual UpstreamRequest* upstreamRequest() PURE; +}; // The base request for Upstream. class UpstreamRequest : public Logger::Loggable, public Http::ResponseDecoder, - public Http::ConnectionPool::Callbacks, - public LinkedObject { + public LinkedObject, + public GenericConnectionPoolCallbacks { public: - UpstreamRequest(Filter& parent, Http::ConnectionPool::Instance& pool); + UpstreamRequest(RouterFilterInterface& parent, std::unique_ptr&& conn_pool); ~UpstreamRequest() override; void encodeHeaders(bool end_stream); @@ -62,15 +96,16 @@ class UpstreamRequest : public Logger::Loggable, void disableDataFromDownstreamForFlowControl(); void enableDataFromDownstreamForFlowControl(); - // Http::ConnectionPool::Callbacks + // GenericConnPool void onPoolFailure(Http::ConnectionPool::PoolFailureReason reason, absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) override; - void onPoolReady(Http::RequestEncoder& request_encoder, + void onPoolReady(std::unique_ptr&& upstream, Upstream::HostDescriptionConstSharedPtr host, + const Network::Address::InstanceConstSharedPtr& upstream_local_address, const StreamInfo::StreamInfo& info) override; + UpstreamRequest* upstreamRequest() override { return this; } - void setRequestEncoder(Http::RequestEncoder& request_encoder); void clearRequestEncoder(); struct DownstreamWatermarkManager : public Http::DownstreamWatermarkCallbacks { @@ -105,11 +140,10 @@ class UpstreamRequest : public Logger::Loggable, } private: - Filter& parent_; - Http::ConnectionPool::Instance& conn_pool_; + RouterFilterInterface& parent_; + std::unique_ptr conn_pool_; bool grpc_rq_success_deferred_; Event::TimerPtr per_try_timeout_; - Http::ConnectionPool::Cancellable* conn_pool_stream_handle_{}; std::unique_ptr upstream_; absl::optional deferred_reset_reason_; Buffer::WatermarkBufferPtr buffered_request_body_; @@ -142,6 +176,30 @@ class UpstreamRequest : public Logger::Loggable, bool record_timeout_budget_ : 1; }; +class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callbacks { +public: + HttpConnPool(Http::ConnectionPool::Instance& conn_pool) : conn_pool_(conn_pool) {} + + // GenericConnPool + void newStream(GenericConnectionPoolCallbacks* callbacks) override; + bool cancelAnyPendingRequest() override; + absl::optional protocol() const override; + + // Http::ConnectionPool::Callbacks + void onPoolFailure(Http::ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) override; + void onPoolReady(Http::RequestEncoder& callbacks_encoder, + Upstream::HostDescriptionConstSharedPtr host, + const StreamInfo::StreamInfo& info) override; + +private: + // Points to the actual connection pool to create streams from. + Http::ConnectionPool::Instance& conn_pool_; + Http::ConnectionPool::Cancellable* conn_pool_stream_handle_{}; + GenericConnectionPoolCallbacks* callbacks_{}; +}; + // A generic API which covers common functionality between HTTP and TCP upstreams. class GenericUpstream { public: diff --git a/source/common/router/vhds.cc b/source/common/router/vhds.cc index a59f0dcccb51..75f8a9570887 100644 --- a/source/common/router/vhds.cc +++ b/source/common/router/vhds.cc @@ -25,7 +25,8 @@ VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, const std::string& stat_prefix, std::unordered_set& route_config_providers, envoy::config::core::v3::ApiVersion resource_api_version) - : config_update_info_(config_update_info), + : Envoy::Config::SubscriptionBase(resource_api_version), + config_update_info_(config_update_info), scope_(factory_context.scope().createScope(stat_prefix + "vhds." + config_update_info_->routeConfigName() + ".")), stats_({ALL_VHDS_STATS(POOL_COUNTER(*scope_))}), @@ -40,7 +41,7 @@ VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, if (config_source != envoy::config::core::v3::ApiConfigSource::DELTA_GRPC) { throw EnvoyException("vhds: only 'DELTA_GRPC' is supported as an api_type."); } - const auto resource_name = getResourceName(resource_api_version); + const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( config_update_info_->routeConfiguration().vhds().config_source(), diff --git a/source/common/router/vhds.h b/source/common/router/vhds.h index fad39432700a..372f5a08989c 100644 --- a/source/common/router/vhds.h +++ b/source/common/router/vhds.h @@ -7,7 +7,6 @@ #include #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/discovery_service_base.h" #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/config/subscription.h" #include "envoy/http/codes.h" @@ -21,6 +20,7 @@ #include "envoy/thread_local/thread_local.h" #include "common/common/logger.h" +#include "common/config/subscription_base.h" #include "common/init/target_impl.h" #include "common/protobuf/utility.h" @@ -53,7 +53,7 @@ class VhdsSubscription : Envoy::Config::SubscriptionBase( + rtds_layer.rtds_config().resource_api_version()), + parent_(parent), config_source_(rtds_layer.rtds_config()), store_(store), resource_name_(rtds_layer.name()), init_target_("RTDS " + resource_name_, [this]() { start(); }), validation_visitor_(validation_visitor) {} @@ -583,7 +585,7 @@ void RtdsSubscription::start() { // We have to delay the subscription creation until init-time, since the // cluster manager resources are not available in the constructor when // instantiated in the server instance. - const auto resource_name = getResourceName(config_source_.resource_api_version()); + const auto resource_name = getResourceName(); subscription_ = parent_.cm_->subscriptionFactory().subscriptionFromConfigSource( config_source_, Grpc::Common::typeUrl(resource_name), store_, *this); subscription_->start({resource_name_}); diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index cc10dd5aa759..7e3443b21daf 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -9,7 +9,6 @@ #include "envoy/common/exception.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/discovery_service_base.h" #include "envoy/config/subscription.h" #include "envoy/init/manager.h" #include "envoy/runtime/runtime.h" @@ -24,6 +23,7 @@ #include "common/common/assert.h" #include "common/common/logger.h" #include "common/common/thread.h" +#include "common/config/subscription_base.h" #include "common/init/target_impl.h" #include "common/singleton/threadsafe_singleton.h" diff --git a/source/common/runtime/runtime_protos.h b/source/common/runtime/runtime_protos.h index b57e161387a2..06b0e5816d5a 100644 --- a/source/common/runtime/runtime_protos.h +++ b/source/common/runtime/runtime_protos.h @@ -28,6 +28,21 @@ class FeatureFlag { Runtime::Loader& runtime_; }; +// Helper class for runtime-derived doubles. +class Double { +public: + Double(const envoy::config::core::v3::RuntimeDouble& double_proto, Runtime::Loader& runtime) + : runtime_key_(double_proto.runtime_key()), default_value_(double_proto.default_value()), + runtime_(runtime) {} + + double value() const { return runtime_.snapshot().getDouble(runtime_key_, default_value_); } + +private: + const std::string runtime_key_; + const double default_value_; + Runtime::Loader& runtime_; +}; + // Helper class for runtime-derived fractional percent flags. class FractionalPercent { public: diff --git a/source/common/runtime/uuid_util.cc b/source/common/runtime/uuid_util.cc deleted file mode 100644 index 615b5c8ce46d..000000000000 --- a/source/common/runtime/uuid_util.cc +++ /dev/null @@ -1,63 +0,0 @@ -#include "common/runtime/uuid_util.h" - -#include -#include - -#include "common/common/utility.h" -#include "common/runtime/runtime_impl.h" - -namespace Envoy { -bool UuidUtils::uuidModBy(const std::string& uuid, uint64_t& out, uint64_t mod) { - if (uuid.length() < 8) { - return false; - } - - uint64_t value; - if (!StringUtil::atoull(uuid.substr(0, 8).c_str(), value, 16)) { - return false; - } - - out = value % mod; - return true; -} - -UuidTraceStatus UuidUtils::isTraceableUuid(absl::string_view uuid) { - if (uuid.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { - return UuidTraceStatus::NoTrace; - } - - switch (uuid[TRACE_BYTE_POSITION]) { - case TRACE_FORCED: - return UuidTraceStatus::Forced; - case TRACE_SAMPLED: - return UuidTraceStatus::Sampled; - case TRACE_CLIENT: - return UuidTraceStatus::Client; - default: - return UuidTraceStatus::NoTrace; - } -} - -bool UuidUtils::setTraceableUuid(std::string& uuid, UuidTraceStatus trace_status) { - if (uuid.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { - return false; - } - - switch (trace_status) { - case UuidTraceStatus::Forced: - uuid[TRACE_BYTE_POSITION] = TRACE_FORCED; - break; - case UuidTraceStatus::Client: - uuid[TRACE_BYTE_POSITION] = TRACE_CLIENT; - break; - case UuidTraceStatus::Sampled: - uuid[TRACE_BYTE_POSITION] = TRACE_SAMPLED; - break; - case UuidTraceStatus::NoTrace: - uuid[TRACE_BYTE_POSITION] = NO_TRACE; - break; - } - - return true; -} -} // namespace Envoy diff --git a/source/common/runtime/uuid_util.h b/source/common/runtime/uuid_util.h deleted file mode 100644 index cf2450b4d5e8..000000000000 --- a/source/common/runtime/uuid_util.h +++ /dev/null @@ -1,57 +0,0 @@ -#pragma once - -#include - -#include "absl/strings/string_view.h" - -namespace Envoy { - -enum class UuidTraceStatus { NoTrace, Sampled, Client, Forced }; - -/** - * Utils for uuid4. - */ -class UuidUtils { -public: - /** - * @return bool to indicate if operation succeeded. - * @param uuid uuid4. - * @param out will contain the result of the operation. - * @param mod modulo used in the operation. - */ - static bool uuidModBy(const std::string& uuid, uint64_t& out, uint64_t mod); - - /** - * Modify uuid in a way it can be detected if uuid is traceable or not. - * @param uuid is expected to be well formed uuid4. - * @param trace_status is to specify why we modify uuid. - * @return true on success, false on failure. - */ - static bool setTraceableUuid(std::string& uuid, UuidTraceStatus trace_status); - - /** - * @return status of the uuid, to differentiate reason for tracing, etc. - */ - static UuidTraceStatus isTraceableUuid(absl::string_view uuid); - -private: - // Byte on this position has predefined value of 4 for UUID4. - static const int TRACE_BYTE_POSITION = 14; - - // Value of '9' is chosen randomly to distinguish between freshly generated uuid4 and the - // one modified because we sample trace. - static const char TRACE_SAMPLED = '9'; - - // Value of 'a' is chosen randomly to distinguish between freshly generated uuid4 and the - // one modified because we force trace. - static const char TRACE_FORCED = 'a'; - - // Value of 'b' is chosen randomly to distinguish between freshly generated uuid4 and the - // one modified because of client trace. - static const char TRACE_CLIENT = 'b'; - - // Initial value for freshly generated uuid4. - static const char NO_TRACE = '4'; -}; - -} // namespace Envoy diff --git a/source/common/secret/BUILD b/source/common/secret/BUILD index df0e31f0cb5c..719c3e884af9 100644 --- a/source/common/secret/BUILD +++ b/source/common/secret/BUILD @@ -44,7 +44,6 @@ envoy_cc_library( srcs = ["sds_api.cc"], hdrs = ["sds_api.h"], deps = [ - "//include/envoy/config:discovery_service_base_interface", "//include/envoy/config:subscription_factory_interface", "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", @@ -57,7 +56,7 @@ envoy_cc_library( "//source/common/common:callback_impl_lib", "//source/common/common:cleanup_lib", "//source/common/config:api_version_lib", - "//source/common/config:resources_lib", + "//source/common/config:subscription_base_interface", "//source/common/config:utility_lib", "//source/common/init:target_lib", "//source/common/protobuf:utility_lib", diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index 1a851a6e553b..deab859adafd 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -10,7 +10,6 @@ #include "common/common/assert.h" #include "common/config/api_version.h" -#include "common/config/resources.h" #include "common/protobuf/utility.h" namespace Envoy { @@ -19,13 +18,17 @@ namespace Secret { SdsApi::SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_view sds_config_name, Config::SubscriptionFactory& subscription_factory, TimeSource& time_source, ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats, - Init::Manager& init_manager, std::function destructor_cb) - : init_target_(fmt::format("SdsApi {}", sds_config_name), [this] { initialize(); }), + Init::Manager& init_manager, std::function destructor_cb, + Event::Dispatcher& dispatcher, Api::Api& api) + : Envoy::Config::SubscriptionBase( + sds_config.resource_api_version()), + init_target_(fmt::format("SdsApi {}", sds_config_name), [this] { initialize(); }), stats_(stats), sds_config_(std::move(sds_config)), sds_config_name_(sds_config_name), secret_hash_(0), clean_up_(std::move(destructor_cb)), validation_visitor_(validation_visitor), subscription_factory_(subscription_factory), time_source_(time_source), secret_data_{sds_config_name_, "uninitialized", - time_source_.systemTime()} { + time_source_.systemTime()}, + dispatcher_(dispatcher), api_(api) { // TODO(JimmyCYJ): Implement chained_init_manager, so that multiple init_manager // can be chained together to behave as one init_manager. In that way, we let // two listeners which share same SdsApi to register at separate init managers, and @@ -45,12 +48,36 @@ void SdsApi::onConfigUpdate(const Protobuf::RepeatedPtrField& fmt::format("Unexpected SDS secret (expecting {}): {}", sds_config_name_, secret.name())); } - const uint64_t new_hash = MessageUtil::hash(secret); + uint64_t new_hash = MessageUtil::hash(secret); + if (new_hash != secret_hash_) { validateConfig(secret); secret_hash_ = new_hash; setSecret(secret); update_callback_manager_.runCallbacks(); + + // List DataSources that refer to files + auto files = getDataSourceFilenames(); + if (!files.empty()) { + // Create new watch, also destroys the old watch if any. + watcher_ = dispatcher_.createFilesystemWatcher(); + files_hash_ = getHashForFiles(); + for (auto const& filename : files) { + // Watch for directory instead of file. This allows users to do atomic renames + // on directory level (e.g. Kubernetes secret update). + const auto result = api_.fileSystem().splitPathFromFilename(filename); + watcher_->addWatch(absl::StrCat(result.directory_, "/"), + Filesystem::Watcher::Events::MovedTo, [this](uint32_t) { + uint64_t new_hash = getHashForFiles(); + if (new_hash != files_hash_) { + update_callback_manager_.runCallbacks(); + files_hash_ = new_hash; + } + }); + } + } else { + watcher_.reset(); // Destroy the old watch if any + } } secret_data_.last_updated_ = time_source_.systemTime(); secret_data_.version_info_ = version_info; @@ -84,7 +111,7 @@ void SdsApi::validateUpdateSize(int num_resources) { } void SdsApi::initialize() { - const auto resource_name = getResourceName(sds_config_.resource_api_version()); + const auto resource_name = getResourceName(); subscription_ = subscription_factory_.subscriptionFromConfigSource( sds_config_, Grpc::Common::typeUrl(resource_name), stats_, *this); subscription_->start({sds_config_name_}); @@ -92,5 +119,43 @@ void SdsApi::initialize() { SdsApi::SecretData SdsApi::secretData() { return secret_data_; } +uint64_t SdsApi::getHashForFiles() { + uint64_t hash = 0; + for (auto const& filename : getDataSourceFilenames()) { + hash = HashUtil::xxHash64(api_.fileSystem().fileReadToEnd(filename), hash); + } + return hash; +} + +std::vector TlsCertificateSdsApi::getDataSourceFilenames() { + std::vector files; + if (tls_certificate_secrets_ && tls_certificate_secrets_->has_certificate_chain() && + tls_certificate_secrets_->certificate_chain().specifier_case() == + envoy::config::core::v3::DataSource::SpecifierCase::kFilename) { + files.push_back(tls_certificate_secrets_->certificate_chain().filename()); + } + if (tls_certificate_secrets_ && tls_certificate_secrets_->has_private_key() && + tls_certificate_secrets_->private_key().specifier_case() == + envoy::config::core::v3::DataSource::SpecifierCase::kFilename) { + files.push_back(tls_certificate_secrets_->private_key().filename()); + } + return files; +} + +std::vector CertificateValidationContextSdsApi::getDataSourceFilenames() { + std::vector files; + if (certificate_validation_context_secrets_ && + certificate_validation_context_secrets_->has_trusted_ca() && + certificate_validation_context_secrets_->trusted_ca().specifier_case() == + envoy::config::core::v3::DataSource::SpecifierCase::kFilename) { + files.push_back(certificate_validation_context_secrets_->trusted_ca().filename()); + } + return files; +} + +std::vector TlsSessionTicketKeysSdsApi::getDataSourceFilenames() { return {}; } + +std::vector GenericSecretSdsApi::getDataSourceFilenames() { return {}; } + } // namespace Secret } // namespace Envoy diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index e1648c8f7f91..0ca7c93f24aa 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -4,7 +4,6 @@ #include "envoy/api/api.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/discovery_service_base.h" #include "envoy/config/subscription.h" #include "envoy/config/subscription_factory.h" #include "envoy/event/dispatcher.h" @@ -21,6 +20,7 @@ #include "common/common/callback_impl.h" #include "common/common/cleanup.h" +#include "common/config/subscription_base.h" #include "common/config/utility.h" #include "common/init/target_impl.h" #include "common/ssl/certificate_validation_context_config_impl.h" @@ -44,7 +44,8 @@ class SdsApi : public Envoy::Config::SubscriptionBase< SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_view sds_config_name, Config::SubscriptionFactory& subscription_factory, TimeSource& time_source, ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats, - Init::Manager& init_manager, std::function destructor_cb); + Init::Manager& init_manager, std::function destructor_cb, + Event::Dispatcher& dispatcher, Api::Api& api); SecretData secretData(); @@ -65,10 +66,13 @@ class SdsApi : public Envoy::Config::SubscriptionBase< return MessageUtil::anyConvert(resource) .name(); } + virtual std::vector getDataSourceFilenames() PURE; private: void validateUpdateSize(int num_resources); void initialize(); + uint64_t getHashForFiles(); + Init::TargetImpl init_target_; Stats::Store& stats_; @@ -77,11 +81,15 @@ class SdsApi : public Envoy::Config::SubscriptionBase< const std::string sds_config_name_; uint64_t secret_hash_; + uint64_t files_hash_; Cleanup clean_up_; ProtobufMessage::ValidationVisitor& validation_visitor_; Config::SubscriptionFactory& subscription_factory_; TimeSource& time_source_; SecretData secret_data_; + Event::Dispatcher& dispatcher_; + Api::Api& api_; + std::unique_ptr watcher_; }; class TlsCertificateSdsApi; @@ -110,16 +118,18 @@ class TlsCertificateSdsApi : public SdsApi, public TlsCertificateConfigProvider sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb); + *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.api()); } TlsCertificateSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, const std::string& sds_config_name, Config::SubscriptionFactory& subscription_factory, TimeSource& time_source, ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats, - Init::Manager& init_manager, std::function destructor_cb) + Init::Manager& init_manager, std::function destructor_cb, + Event::Dispatcher& dispatcher, Api::Api& api) : SdsApi(sds_config, sds_config_name, subscription_factory, time_source, validation_visitor, - stats, init_manager, std::move(destructor_cb)) {} + stats, init_manager, std::move(destructor_cb), dispatcher, api) {} // SecretProvider const envoy::extensions::transport_sockets::tls::v3::TlsCertificate* secret() const override { @@ -144,6 +154,7 @@ class TlsCertificateSdsApi : public SdsApi, public TlsCertificateConfigProvider secret.tls_certificate()); } void validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret&) override {} + std::vector getDataSourceFilenames() override; private: TlsCertificatePtr tls_certificate_secrets_; @@ -168,7 +179,8 @@ class CertificateValidationContextSdsApi : public SdsApi, sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb); + *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.api()); } CertificateValidationContextSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, const std::string& sds_config_name, @@ -176,9 +188,10 @@ class CertificateValidationContextSdsApi : public SdsApi, TimeSource& time_source, ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats, Init::Manager& init_manager, - std::function destructor_cb) + std::function destructor_cb, + Event::Dispatcher& dispatcher, Api::Api& api) : SdsApi(sds_config, sds_config_name, subscription_factory, time_source, validation_visitor, - stats, init_manager, std::move(destructor_cb)) {} + stats, init_manager, std::move(destructor_cb), dispatcher, api) {} // SecretProvider const envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext* @@ -210,6 +223,7 @@ class CertificateValidationContextSdsApi : public SdsApi, validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override { validation_callback_manager_.runCallbacks(secret.validation_context()); } + std::vector getDataSourceFilenames() override; private: CertificateValidationContextPtr certificate_validation_context_secrets_; @@ -236,7 +250,8 @@ class TlsSessionTicketKeysSdsApi : public SdsApi, public TlsSessionTicketKeysCon sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb); + *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.api()); } TlsSessionTicketKeysSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, @@ -245,9 +260,10 @@ class TlsSessionTicketKeysSdsApi : public SdsApi, public TlsSessionTicketKeysCon TimeSource& time_source, ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats, Init::Manager& init_manager, - std::function destructor_cb) + std::function destructor_cb, Event::Dispatcher& dispatcher, + Api::Api& api) : SdsApi(sds_config, sds_config_name, subscription_factory, time_source, validation_visitor, - stats, init_manager, std::move(destructor_cb)) {} + stats, init_manager, std::move(destructor_cb), dispatcher, api) {} // SecretProvider const envoy::extensions::transport_sockets::tls::v3::TlsSessionTicketKeys* @@ -280,6 +296,7 @@ class TlsSessionTicketKeysSdsApi : public SdsApi, public TlsSessionTicketKeysCon validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override { validation_callback_manager_.runCallbacks(secret.session_ticket_keys()); } + std::vector getDataSourceFilenames() override; private: Secret::TlsSessionTicketKeysPtr tls_session_ticket_keys_; @@ -304,16 +321,18 @@ class GenericSecretSdsApi : public SdsApi, public GenericSecretConfigProvider { sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb); + *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.api()); } GenericSecretSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, const std::string& sds_config_name, Config::SubscriptionFactory& subscription_factory, TimeSource& time_source, ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats, - Init::Manager& init_manager, std::function destructor_cb) + Init::Manager& init_manager, std::function destructor_cb, + Event::Dispatcher& dispatcher, Api::Api& api) : SdsApi(sds_config, sds_config_name, subscription_factory, time_source, validation_visitor, - stats, init_manager, std::move(destructor_cb)) {} + stats, init_manager, std::move(destructor_cb), dispatcher, api) {} // SecretProvider const envoy::extensions::transport_sockets::tls::v3::GenericSecret* secret() const override { @@ -337,6 +356,7 @@ class GenericSecretSdsApi : public SdsApi, public GenericSecretConfigProvider { validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret& secret) override { validation_callback_manager_.runCallbacks(secret.generic_secret()); } + std::vector getDataSourceFilenames() override; private: GenericSecretPtr generic_secret; diff --git a/source/common/stats/histogram_impl.cc b/source/common/stats/histogram_impl.cc index a22ac822c4a6..f7ab4897596b 100644 --- a/source/common/stats/histogram_impl.cc +++ b/source/common/stats/histogram_impl.cc @@ -43,7 +43,8 @@ std::string HistogramStatisticsImpl::quantileSummary() const { const std::vector& supported_quantiles = supportedQuantiles(); summary.reserve(supported_quantiles.size()); for (size_t i = 0; i < supported_quantiles.size(); ++i) { - summary.push_back(fmt::format("P{}: {}", 100 * supported_quantiles[i], computed_quantiles_[i])); + summary.push_back( + fmt::format("P{:g}: {:g}", 100 * supported_quantiles[i], computed_quantiles_[i])); } return absl::StrJoin(summary, ", "); } @@ -53,7 +54,7 @@ std::string HistogramStatisticsImpl::bucketSummary() const { const std::vector& supported_buckets = supportedBuckets(); bucket_summary.reserve(supported_buckets.size()); for (size_t i = 0; i < supported_buckets.size(); ++i) { - bucket_summary.push_back(fmt::format("B{}: {}", supported_buckets[i], computed_buckets_[i])); + bucket_summary.push_back(fmt::format("B{:g}: {}", supported_buckets[i], computed_buckets_[i])); } return absl::StrJoin(bucket_summary, ", "); } diff --git a/source/common/stats/isolated_store_impl.h b/source/common/stats/isolated_store_impl.h index a3d99fd62498..987617a9fe90 100644 --- a/source/common/stats/isolated_store_impl.h +++ b/source/common/stats/isolated_store_impl.h @@ -144,15 +144,15 @@ class IsolatedStoreImpl : public StoreImpl { return std::vector{}; } - Counter& counter(const std::string& name) override { + Counter& counterFromString(const std::string& name) override { StatNameManagedStorage storage(name, symbolTable()); return counterFromStatName(storage.statName()); } - Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) override { + Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override { StatNameManagedStorage storage(name, symbolTable()); return gaugeFromStatName(storage.statName(), import_mode); } - Histogram& histogram(const std::string& name, Histogram::Unit unit) override { + Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override { StatNameManagedStorage storage(name, symbolTable()); return histogramFromStatName(storage.statName(), unit); } diff --git a/source/common/stats/scope_prefixer.h b/source/common/stats/scope_prefixer.h index 0c8866643a44..948de23fba2d 100644 --- a/source/common/stats/scope_prefixer.h +++ b/source/common/stats/scope_prefixer.h @@ -25,15 +25,15 @@ class ScopePrefixer : public Scope { Histogram::Unit unit) override; void deliverHistogramToSinks(const Histogram& histograms, uint64_t val) override; - Counter& counter(const std::string& name) override { + Counter& counterFromString(const std::string& name) override { StatNameManagedStorage storage(name, symbolTable()); return Scope::counterFromStatName(storage.statName()); } - Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) override { + Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override { StatNameManagedStorage storage(name, symbolTable()); return Scope::gaugeFromStatName(storage.statName(), import_mode); } - Histogram& histogram(const std::string& name, Histogram::Unit unit) override { + Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override { StatNameManagedStorage storage(name, symbolTable()); return Scope::histogramFromStatName(storage.statName(), unit); } diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index 74319c1f5e1e..9121e56739ce 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -301,7 +301,7 @@ class SymbolTableImpl : public SymbolTable { class StatNameStorageBase { public: StatNameStorageBase(SymbolTable::StoragePtr&& bytes) : bytes_(std::move(bytes)) {} - StatNameStorageBase() {} + StatNameStorageBase() = default; /** * @return a reference to the owned storage. diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index a94abc513f2b..2d3c9645cd55 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -669,7 +669,7 @@ const std::string ParentHistogramImpl::quantileSummary() const { const std::vector& supported_quantiles_ref = interval_statistics_.supportedQuantiles(); summary.reserve(supported_quantiles_ref.size()); for (size_t i = 0; i < supported_quantiles_ref.size(); ++i) { - summary.push_back(fmt::format("P{}({},{})", 100 * supported_quantiles_ref[i], + summary.push_back(fmt::format("P{:g}({},{})", 100 * supported_quantiles_ref[i], interval_statistics_.computedQuantiles()[i], cumulative_statistics_.computedQuantiles()[i])); } @@ -685,7 +685,7 @@ const std::string ParentHistogramImpl::bucketSummary() const { const std::vector& supported_buckets = interval_statistics_.supportedBuckets(); bucket_summary.reserve(supported_buckets.size()); for (size_t i = 0; i < supported_buckets.size(); ++i) { - bucket_summary.push_back(fmt::format("B{}({},{})", supported_buckets[i], + bucket_summary.push_back(fmt::format("B{:g}({},{})", supported_buckets[i], interval_statistics_.computedBuckets()[i], cumulative_statistics_.computedBuckets()[i])); } diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 5152821d3f48..d503041d242f 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -163,7 +163,9 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo StatNameTagVectorOptConstRef tags) override { return default_scope_->counterFromStatNameWithTags(name, tags); } - Counter& counter(const std::string& name) override { return default_scope_->counter(name); } + Counter& counterFromString(const std::string& name) override { + return default_scope_->counterFromString(name); + } ScopePtr createScope(const std::string& name) override; void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override { return default_scope_->deliverHistogramToSinks(histogram, value); @@ -172,15 +174,15 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo Gauge::ImportMode import_mode) override { return default_scope_->gaugeFromStatNameWithTags(name, tags, import_mode); } - Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) override { - return default_scope_->gauge(name, import_mode); + Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override { + return default_scope_->gaugeFromString(name, import_mode); } Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags, Histogram::Unit unit) override { return default_scope_->histogramFromStatNameWithTags(name, tags, unit); } - Histogram& histogram(const std::string& name, Histogram::Unit unit) override { - return default_scope_->histogram(name, unit); + Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override { + return default_scope_->histogramFromString(name, unit); } NullGaugeImpl& nullGauge(const std::string&) override { return null_gauge_; } const SymbolTable& constSymbolTable() const override { return alloc_.constSymbolTable(); } @@ -301,15 +303,15 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo const SymbolTable& constSymbolTable() const override { return parent_.constSymbolTable(); } SymbolTable& symbolTable() override { return parent_.symbolTable(); } - Counter& counter(const std::string& name) override { + Counter& counterFromString(const std::string& name) override { StatNameManagedStorage storage(name, symbolTable()); return counterFromStatName(storage.statName()); } - Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) override { + Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override { StatNameManagedStorage storage(name, symbolTable()); return gaugeFromStatName(storage.statName(), import_mode); } - Histogram& histogram(const std::string& name, Histogram::Unit unit) override { + Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override { StatNameManagedStorage storage(name, symbolTable()); return histogramFromStatName(storage.statName(), unit); } diff --git a/source/common/stream_info/BUILD b/source/common/stream_info/BUILD index e2c04653a556..9abb095ed978 100644 --- a/source/common/stream_info/BUILD +++ b/source/common/stream_info/BUILD @@ -13,9 +13,11 @@ envoy_cc_library( hdrs = ["stream_info_impl.h"], deps = [ ":filter_state_lib", + "//include/envoy/http:request_id_extension_interface", "//include/envoy/stream_info:stream_info_interface", "//source/common/common:assert_lib", "//source/common/common:dump_state_utils", + "//source/common/http:request_id_extension_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index 85f3296cb948..d181153f9a11 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -6,10 +6,12 @@ #include "envoy/common/time.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/http/header_map.h" +#include "envoy/http/request_id_extension.h" #include "envoy/stream_info/stream_info.h" #include "common/common/assert.h" #include "common/common/dump_state_utils.h" +#include "common/http/request_id_extension_impl.h" #include "common/stream_info/filter_state_impl.h" namespace Envoy { @@ -19,12 +21,14 @@ struct StreamInfoImpl : public StreamInfo { StreamInfoImpl(TimeSource& time_source) : time_source_(time_source), start_time_(time_source.systemTime()), start_time_monotonic_(time_source.monotonicTime()), - filter_state_(std::make_shared(FilterState::LifeSpan::FilterChain)) {} + filter_state_(std::make_shared(FilterState::LifeSpan::FilterChain)), + request_id_extension_(Http::RequestIDExtensionFactory::noopInstance()) {} StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source) : time_source_(time_source), start_time_(time_source.systemTime()), start_time_monotonic_(time_source.monotonicTime()), protocol_(protocol), - filter_state_(std::make_shared(FilterState::LifeSpan::FilterChain)) {} + filter_state_(std::make_shared(FilterState::LifeSpan::FilterChain)), + request_id_extension_(Http::RequestIDExtensionFactory::noopInstance()) {} StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source, std::shared_ptr& parent_filter_state) @@ -33,7 +37,8 @@ struct StreamInfoImpl : public StreamInfo { filter_state_(std::make_shared( FilterStateImpl::LazyCreateAncestor(parent_filter_state, FilterState::LifeSpan::DownstreamConnection), - FilterState::LifeSpan::FilterChain)) {} + FilterState::LifeSpan::FilterChain)), + request_id_extension_(Http::RequestIDExtensionFactory::noopInstance()) {} SystemTime startTime() const override { return start_time_; } @@ -246,6 +251,13 @@ struct StreamInfoImpl : public StreamInfo { const Http::RequestHeaderMap* getRequestHeaders() const override { return request_headers_; } + void setRequestIDExtension(Http::RequestIDExtensionSharedPtr utils) override { + request_id_extension_ = utils; + } + Http::RequestIDExtensionSharedPtr getRequestIDExtension() const override { + return request_id_extension_; + } + void dumpState(std::ostream& os, int indent_level = 0) const { const char* spaces = spacesForLevel(indent_level); os << spaces << "StreamInfoImpl " << this << DUMP_OPTIONAL_MEMBER(protocol_) @@ -294,6 +306,7 @@ struct StreamInfoImpl : public StreamInfo { Ssl::ConnectionInfoConstSharedPtr upstream_ssl_info_; std::string requested_server_name_; const Http::RequestHeaderMap* request_headers_{}; + Http::RequestIDExtensionSharedPtr request_id_extension_; UpstreamTiming upstream_timing_; std::string upstream_transport_failure_reason_; absl::optional upstream_cluster_info_; diff --git a/source/common/stream_info/utility.cc b/source/common/stream_info/utility.cc index 25df04a43c4a..ccd24cb1acf7 100644 --- a/source/common/stream_info/utility.cc +++ b/source/common/stream_info/utility.cc @@ -157,5 +157,14 @@ Utility::formatDownstreamAddressNoPort(const Network::Address::Instance& address } } +const std::string +Utility::formatDownstreamAddressJustPort(const Network::Address::Instance& address) { + std::string port; + if (address.type() == Network::Address::Type::Ip) { + port = std::to_string(address.ip()->port()); + } + return port; +} + } // namespace StreamInfo } // namespace Envoy diff --git a/source/common/stream_info/utility.h b/source/common/stream_info/utility.h index fa2d0f7befd4..fe8059b89643 100644 --- a/source/common/stream_info/utility.h +++ b/source/common/stream_info/utility.h @@ -53,6 +53,13 @@ class Utility { */ static const std::string& formatDownstreamAddressNoPort(const Network::Address::Instance& address); + + /** + * @param address supplies the downstream address. + * @return a port, extracted from the provided downstream address for logs, header expansion, etc. + */ + static const std::string + formatDownstreamAddressJustPort(const Network::Address::Instance& address); }; } // namespace StreamInfo diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index 217b79cfdfbb..a80e8e05a044 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -31,6 +31,7 @@ class ConnPoolImpl : Logger::Loggable, public ConnectionPool:: void addDrainedCallback(DrainedCb cb) override; void drainConnections() override; ConnectionPool::Cancellable* newConnection(ConnectionPool::Callbacks& callbacks) override; + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } protected: struct ActiveConn; diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index 12fb538cd4b2..dafba6aa0ea2 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -1,6 +1,7 @@ #include "common/tcp_proxy/tcp_proxy.h" #include +#include #include #include "envoy/buffer/buffer.h" @@ -229,8 +230,6 @@ Filter::Filter(ConfigSharedPtr config, Upstream::ClusterManager& cluster_manager } Filter::~Filter() { - getStreamInfo().onRequestComplete(); - for (const auto& access_log : config_->accessLogs()) { access_log->log(nullptr, nullptr, nullptr, getStreamInfo()); } @@ -253,11 +252,6 @@ void Filter::initialize(Network::ReadFilterCallbacks& callbacks, bool set_connec read_callbacks_->connection().addConnectionCallbacks(downstream_callbacks_); read_callbacks_->connection().enableHalfClose(true); - getStreamInfo().setDownstreamLocalAddress(read_callbacks_->connection().localAddress()); - getStreamInfo().setDownstreamRemoteAddress(read_callbacks_->connection().remoteAddress()); - getStreamInfo().setDownstreamDirectRemoteAddress( - read_callbacks_->connection().directRemoteAddress()); - getStreamInfo().setDownstreamSslConnection(read_callbacks_->connection().ssl()); // Need to disable reads so that we don't write to an upstream that might fail // in onData(). This will get re-enabled when the upstream connection is @@ -445,7 +439,7 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { Tcp::ConnectionPool::Cancellable* handle = conn_pool->newConnection(*this); if (handle) { ASSERT(upstream_handle_.get() == nullptr); - upstream_handle_.reset(new TcpConnectionHandle(handle)); + upstream_handle_ = std::make_shared(handle); } // Because we never return open connections to the pool, this either has a handle waiting on // connection completion, or onPoolFailure has been invoked. Either way, stop iteration. @@ -455,11 +449,11 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { Http::ConnectionPool::Instance* conn_pool = cluster_manager_.httpConnPoolForCluster( cluster_name, Upstream::ResourcePriority::Default, Http::Protocol::Http2, this); if (conn_pool) { - upstream_.reset( - new HttpUpstream(*upstream_callbacks_, config_->tunnelingConfig()->hostname())); + upstream_ = std::make_unique(*upstream_callbacks_, + config_->tunnelingConfig()->hostname()); HttpUpstream* http_upstream = static_cast(upstream_.get()); - upstream_handle_.reset( - new HttpConnectionHandle(conn_pool->newStream(http_upstream->responseDecoder(), *this))); + upstream_handle_ = std::make_shared( + conn_pool->newStream(http_upstream->responseDecoder(), *this)); return Network::FilterStatus::StopIteration; } } @@ -513,7 +507,7 @@ void Filter::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, Upstream::HostDescriptionConstSharedPtr host) { Tcp::ConnectionPool::ConnectionData* latched_data = conn_data.get(); - upstream_.reset(new TcpUpstream(std::move(conn_data), *upstream_callbacks_)); + upstream_ = std::make_unique(std::move(conn_data), *upstream_callbacks_); onPoolReadyBase(host, latched_data->connection().localAddress(), latched_data->connection().streamInfo().downstreamSslConnection()); read_callbacks_->connection().streamInfo().setUpstreamFilterState( @@ -550,7 +544,6 @@ void Filter::onConnectTimeout() { Network::FilterStatus Filter::onData(Buffer::Instance& data, bool end_stream) { ENVOY_CONN_LOG(trace, "downstream connection received {} bytes, end_stream={}", read_callbacks_->connection(), data.length(), end_stream); - getStreamInfo().addBytesReceived(data.length()); if (upstream_) { upstream_->encodeData(data, end_stream); } @@ -589,7 +582,6 @@ void Filter::onDownstreamEvent(Network::ConnectionEvent event) { void Filter::onUpstreamData(Buffer::Instance& data, bool end_stream) { ENVOY_CONN_LOG(trace, "upstream connection received {} bytes, end_stream={}", read_callbacks_->connection(), data.length(), end_stream); - getStreamInfo().addBytesSent(data.length()); read_callbacks_->connection().write(data, end_stream); ASSERT(0 == data.length()); resetIdleTimer(); // TODO(ggreenway) PERF: do we need to reset timer on both send and receive? diff --git a/source/common/tcp_proxy/upstream.h b/source/common/tcp_proxy/upstream.h index c83c2bd3245b..db193b0f4bba 100644 --- a/source/common/tcp_proxy/upstream.h +++ b/source/common/tcp_proxy/upstream.h @@ -12,7 +12,7 @@ namespace TcpProxy { // or an HttpConnectionHandle class ConnectionHandle { public: - virtual ~ConnectionHandle() {} + virtual ~ConnectionHandle() = default; // Cancel the conn pool request and close any excess pending requests. virtual void cancel() PURE; }; @@ -43,7 +43,7 @@ class HttpConnectionHandle : public ConnectionHandle { // upstream. class GenericUpstream { public: - virtual ~GenericUpstream() {} + virtual ~GenericUpstream() = default; // Calls readDisable on the upstream connection. Returns false if readDisable could not be // performed (e.g. if the connection is closed) virtual bool readDisable(bool disable) PURE; @@ -75,7 +75,7 @@ class TcpUpstream : public GenericUpstream { class HttpUpstream : public GenericUpstream, Http::StreamCallbacks { public: HttpUpstream(Tcp::ConnectionPool::UpstreamCallbacks& callbacks, const std::string& hostname); - ~HttpUpstream(); + ~HttpUpstream() override; static bool isValidBytestreamResponse(const Http::ResponseHeaderMap& headers); diff --git a/source/common/tracing/BUILD b/source/common/tracing/BUILD index 51a74a87d441..d516876313b5 100644 --- a/source/common/tracing/BUILD +++ b/source/common/tracing/BUILD @@ -17,6 +17,7 @@ envoy_cc_library( "http_tracer_impl.h", ], deps = [ + "//include/envoy/http:request_id_extension_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/thread_local:thread_local_interface", @@ -36,7 +37,6 @@ envoy_cc_library( "//source/common/http:utility_lib", "//source/common/json:json_loader_lib", "//source/common/protobuf:utility_lib", - "//source/common/runtime:uuid_util_lib", "//source/common/stream_info:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/type/metadata/v3:pkg_cc_proto", diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index d9fe662ec69d..f3f568666f60 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -18,7 +18,6 @@ #include "common/http/headers.h" #include "common/http/utility.h" #include "common/protobuf/utility.h" -#include "common/runtime/uuid_util.h" #include "common/stream_info/utility.h" #include "absl/strings/str_cat.h" @@ -70,21 +69,17 @@ Decision HttpTracerUtility::isTracing(const StreamInfo::StreamInfo& stream_info, return {Reason::HealthCheck, false}; } - if (!request_headers.RequestId()) { - return {Reason::NotTraceableRequestId, false}; - } - - UuidTraceStatus trace_status = - UuidUtils::isTraceableUuid(request_headers.RequestId()->value().getStringView()); + Http::TraceStatus trace_status = + stream_info.getRequestIDExtension()->getTraceStatus(request_headers); switch (trace_status) { - case UuidTraceStatus::Client: + case Http::TraceStatus::Client: return {Reason::ClientForced, true}; - case UuidTraceStatus::Forced: + case Http::TraceStatus::Forced: return {Reason::ServiceForced, true}; - case UuidTraceStatus::Sampled: + case Http::TraceStatus::Sampled: return {Reason::Sampling, true}; - case UuidTraceStatus::NoTrace: + case Http::TraceStatus::NoTrace: return {Reason::NotTraceableRequestId, false}; } diff --git a/source/common/tracing/http_tracer_impl.h b/source/common/tracing/http_tracer_impl.h index 109290707859..14cb47cbeb6a 100644 --- a/source/common/tracing/http_tracer_impl.h +++ b/source/common/tracing/http_tracer_impl.h @@ -4,6 +4,7 @@ #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/http/request_id_extension.h" #include "envoy/local_info/local_info.h" #include "envoy/runtime/runtime.h" #include "envoy/thread_local/thread_local.h" diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index f4af28441a57..608c24bbfb3f 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -13,14 +13,13 @@ envoy_cc_library( srcs = ["cds_api_impl.cc"], hdrs = ["cds_api_impl.h"], deps = [ - "//include/envoy/config:discovery_service_base_interface", "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/local_info:local_info_interface", "//source/common/common:cleanup_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:api_version_lib", - "//source/common/config:resources_lib", + "//source/common/config:subscription_base_interface", "//source/common/config:utility_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/api/v2:pkg_cc_proto", @@ -75,6 +74,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "cluster_update_tracker_lib", + srcs = ["cluster_update_tracker.cc"], + hdrs = ["cluster_update_tracker.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + ], +) + envoy_cc_library( name = "conn_pool_map", hdrs = ["conn_pool_map.h"], @@ -331,6 +339,9 @@ envoy_cc_library( name = "ring_hash_lb_lib", srcs = ["ring_hash_lb.cc"], hdrs = ["ring_hash_lb.h"], + external_deps = [ + "abseil_inlined_vector", + ], deps = [ ":thread_aware_lb_lib", "//source/common/common:minimal_logger_lib", @@ -345,7 +356,6 @@ envoy_cc_library( deps = [ ":cluster_factory_lib", ":upstream_includes", - "//include/envoy/config:discovery_service_base_interface", "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_factory_interface", "//include/envoy/config:subscription_interface", @@ -355,6 +365,7 @@ envoy_cc_library( "//include/envoy/upstream:locality_lib", "//source/common/config:api_version_lib", "//source/common/config:metadata_lib", + "//source/common/config:subscription_base_interface", "//source/common/config:subscription_factory_lib", "//source/common/config:utility_lib", "//source/common/config:version_converter_lib", diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index d7cf1534f6b5..2e83937b9877 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -30,9 +30,11 @@ CdsApiPtr CdsApiImpl::create(const envoy::config::core::v3::ConfigSource& cds_co CdsApiImpl::CdsApiImpl(const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm, Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validation_visitor) - : cm_(cm), scope_(scope.createScope("cluster_manager.cds.")), + : Envoy::Config::SubscriptionBase( + cds_config.resource_api_version()), + cm_(cm), scope_(scope.createScope("cluster_manager.cds.")), validation_visitor_(validation_visitor) { - const auto resource_name = getResourceName(cds_config.resource_api_version()); + const auto resource_name = getResourceName(); subscription_ = cm_.subscriptionFactory().subscriptionFromConfigSource( cds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this); } @@ -129,4 +131,4 @@ void CdsApiImpl::runInitializeCallbackIfAny() { } } // namespace Upstream -} // namespace Envoy +} // namespace Envoy \ No newline at end of file diff --git a/source/common/upstream/cds_api_impl.h b/source/common/upstream/cds_api_impl.h index 5eca7076befe..f2f66340e9b0 100644 --- a/source/common/upstream/cds_api_impl.h +++ b/source/common/upstream/cds_api_impl.h @@ -5,7 +5,6 @@ #include "envoy/api/api.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/discovery_service_base.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" #include "envoy/local_info/local_info.h" @@ -14,6 +13,7 @@ #include "envoy/upstream/cluster_manager.h" #include "common/common/logger.h" +#include "common/config/subscription_base.h" namespace Envoy { namespace Upstream { diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index 9f6bbcdd84e0..8233ae6ac6be 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -57,6 +57,14 @@ std::pair ClusterFactoryImplBase:: } else { cluster_type = cluster.cluster_type().name(); } + + if (cluster.common_lb_config().has_consistent_hashing_lb_config() && + cluster.common_lb_config().consistent_hashing_lb_config().use_hostname_for_hashing() && + cluster.type() != envoy::config::cluster::v3::Cluster::STRICT_DNS) { + throw EnvoyException(fmt::format( + "Cannot use hostname for consistent hashing loadbalancing for cluster of type: '{}'", + cluster_type)); + } ClusterFactory* factory = Registry::FactoryRegistry::getFactory(cluster_type); if (factory == nullptr) { diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index aa5f724446f6..8fc470ae447b 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -225,8 +225,8 @@ ClusterManagerImpl::ClusterManagerImpl( if (cm_config.has_outlier_detection()) { const std::string event_log_file_path = cm_config.outlier_detection().event_log_path(); if (!event_log_file_path.empty()) { - outlier_event_logger_.reset( - new Outlier::EventLoggerImpl(log_manager, event_log_file_path, time_source_)); + outlier_event_logger_ = std::make_shared( + log_manager, event_log_file_path, time_source_); } } @@ -268,7 +268,13 @@ ClusterManagerImpl::ClusterManagerImpl( ->create(), main_thread_dispatcher, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v2.AggregatedDiscoveryService.DeltaAggregatedResources"), + dyn_resources.ads_config().transport_api_version() == + envoy::config::core::v3::ApiVersion::V3 + // TODO(htuch): consolidate with type_to_endpoint.cc, once we sort out the future + // direction of that module re: https://github.com/envoyproxy/envoy/issues/10650. + ? "envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources" + : "envoy.service.discovery.v2.AggregatedDiscoveryService." + "DeltaAggregatedResources"), dyn_resources.ads_config().transport_api_version(), random_, stats_, Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), local_info); } else { @@ -281,6 +287,8 @@ ClusterManagerImpl::ClusterManagerImpl( *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( dyn_resources.ads_config().transport_api_version() == envoy::config::core::v3::ApiVersion::V3 + // TODO(htuch): consolidate with type_to_endpoint.cc, once we sort out the future + // direction of that module re: https://github.com/envoyproxy/envoy/issues/10650. ? "envoy.service.discovery.v3.AggregatedDiscoveryService." "StreamAggregatedResources" : "envoy.service.discovery.v2.AggregatedDiscoveryService." diff --git a/source/common/upstream/cluster_update_tracker.cc b/source/common/upstream/cluster_update_tracker.cc new file mode 100644 index 000000000000..42ee974ee0ae --- /dev/null +++ b/source/common/upstream/cluster_update_tracker.cc @@ -0,0 +1,28 @@ +#include "common/upstream/cluster_update_tracker.h" + +namespace Envoy { +namespace Upstream { + +ClusterUpdateTracker::ClusterUpdateTracker(ClusterManager& cm, const std::string& cluster_name) + : cluster_name_(cluster_name), + cluster_update_callbacks_handle_(cm.addThreadLocalClusterUpdateCallbacks(*this)) { + Upstream::ThreadLocalCluster* cluster = cm.get(cluster_name_); + cluster_info_ = cluster ? cluster->info() : nullptr; +} + +void ClusterUpdateTracker::onClusterAddOrUpdate(ThreadLocalCluster& cluster) { + if (cluster.info()->name() != cluster_name_) { + return; + } + cluster_info_ = cluster.info(); +} + +void ClusterUpdateTracker::onClusterRemoval(const std::string& cluster) { + if (cluster != cluster_name_) { + return; + } + cluster_info_.reset(); +} + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/cluster_update_tracker.h b/source/common/upstream/cluster_update_tracker.h new file mode 100644 index 000000000000..b55d1b0d5483 --- /dev/null +++ b/source/common/upstream/cluster_update_tracker.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +namespace Envoy { +namespace Upstream { + +/** + * Keeps track of cluster updates in order to spot addition and removal. + * + * Use this class as a performance optimization to avoid going through ClusterManager::get() + * on the hot path. + */ +class ClusterUpdateTracker : public ClusterUpdateCallbacks { +public: + ClusterUpdateTracker(ClusterManager& cm, const std::string& cluster_name); + + bool exists() { return cluster_info_ != nullptr; } + ClusterInfoConstSharedPtr info() { return cluster_info_; } + + // ClusterUpdateCallbacks + void onClusterAddOrUpdate(ThreadLocalCluster& cluster) override; + void onClusterRemoval(const std::string& cluster) override; + +private: + const std::string cluster_name_; + const ClusterUpdateCallbacksHandlePtr cluster_update_callbacks_handle_; + + ClusterInfoConstSharedPtr cluster_info_; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 81e2a8deb11f..03a12914cfa8 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -22,6 +22,8 @@ EdsClusterImpl::EdsClusterImpl( Stats::ScopePtr&& stats_scope, bool added_via_api) : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), added_via_api), + Envoy::Config::SubscriptionBase( + cluster.eds_cluster_config().eds_config().resource_api_version()), local_info_(factory_context.localInfo()), cluster_name_(cluster.eds_cluster_config().service_name().empty() ? cluster.name() @@ -36,8 +38,7 @@ EdsClusterImpl::EdsClusterImpl( } else { initialize_phase_ = InitializePhase::Secondary; } - const auto resource_name = - getResourceName(cluster.eds_cluster_config().eds_config().resource_api_version()); + const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( eds_config, Grpc::Common::typeUrl(resource_name), info_->statsScope(), *this); @@ -55,7 +56,8 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { priority_state_manager.registerHostForPriority( - "", parent_.resolveProtoAddress(lb_endpoint.endpoint().address()), locality_lb_endpoint, + lb_endpoint.endpoint().hostname(), + parent_.resolveProtoAddress(lb_endpoint.endpoint().address()), locality_lb_endpoint, lb_endpoint); } } diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index ebe8dc375523..fa3b09eb8cca 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -3,7 +3,6 @@ #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/discovery_service_base.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" #include "envoy/config/subscription.h" #include "envoy/config/subscription_factory.h" @@ -13,6 +12,7 @@ #include "envoy/stats/scope.h" #include "envoy/upstream/locality.h" +#include "common/config/subscription_base.h" #include "common/upstream/cluster_factory_impl.h" #include "common/upstream/upstream_impl.h" diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index d12be6acde91..70bb4fd9493f 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -1,5 +1,7 @@ #include "common/upstream/health_checker_impl.h" +#include + #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/data/core/v3/health_check_event.pb.h" #include "envoy/server/health_checker_config.h" @@ -25,6 +27,33 @@ namespace Envoy { namespace Upstream { +namespace { + +// Helper functions to get the correct hostname for an L7 health check. +const std::string& getHostname(const HostSharedPtr& host, const std::string& config_hostname, + const ClusterInfoConstSharedPtr& cluster) { + if (!host->hostnameForHealthChecks().empty()) { + return host->hostnameForHealthChecks(); + } + + if (!config_hostname.empty()) { + return config_hostname; + } + + return cluster->name(); +} + +const std::string& getHostname(const HostSharedPtr& host, + const absl::optional& config_hostname, + const ClusterInfoConstSharedPtr& cluster) { + if (config_hostname.has_value()) { + return getHostname(host, config_hostname.value(), cluster); + } + return getHostname(host, EMPTY_STRING, cluster); +} + +} // namespace + class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthCheckerFactoryContext { public: HealthCheckerFactoryContextImpl(Upstream::Cluster& cluster, Envoy::Runtime::Loader& runtime, @@ -181,8 +210,7 @@ Http::Protocol codecClientTypeToProtocol(Http::CodecClient::Type codec_client_ty HttpHealthCheckerImpl::HttpActiveHealthCheckSession::HttpActiveHealthCheckSession( HttpHealthCheckerImpl& parent, const HostSharedPtr& host) : ActiveHealthCheckSession(parent, host), parent_(parent), - hostname_(parent_.host_value_.empty() ? parent_.cluster_.info()->name() - : parent_.host_value_), + hostname_(getHostname(host, parent_.host_value_, parent_.cluster_.info())), protocol_(codecClientTypeToProtocol(parent_.codec_client_type_)), local_address_(std::make_shared("127.0.0.1")) {} @@ -479,7 +507,7 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onInterval() { client_ = host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions()) .connection_; - session_callbacks_.reset(new TcpSessionCallbacks(*this)); + session_callbacks_ = std::make_shared(*this); client_->addConnectionCallbacks(*session_callbacks_); client_->addReadFilter(session_callbacks_); @@ -640,9 +668,8 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onInterval() { request_encoder_ = &client_->newStream(*this); request_encoder_->getStream().addCallbacks(*this); - const std::string& authority = parent_.authority_value_.has_value() - ? parent_.authority_value_.value() - : parent_.cluster_.info()->name(); + const std::string& authority = + getHostname(host_, parent_.authority_value_, parent_.cluster_.info()); auto headers_message = Grpc::Common::prepareHeaders(authority, parent_.service_method_.service()->full_name(), parent_.service_method_.name(), absl::nullopt); diff --git a/source/common/upstream/logical_dns_cluster.cc b/source/common/upstream/logical_dns_cluster.cc index 88f3cc5aba99..709e801fb34a 100644 --- a/source/common/upstream/logical_dns_cluster.cc +++ b/source/common/upstream/logical_dns_cluster.cc @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -120,8 +121,8 @@ void LogicalDnsCluster::startResolve() { Network::Utility::portFromTcpUrl(dns_url_)); if (!logical_host_) { - logical_host_.reset(new LogicalHost(info_, hostname_, new_address, localityLbEndpoint(), - lbEndpoint(), nullptr)); + logical_host_ = std::make_shared( + info_, hostname_, new_address, localityLbEndpoint(), lbEndpoint(), nullptr); const auto& locality_lb_endpoint = localityLbEndpoint(); PriorityStateManager priority_state_manager(*this, local_info_, nullptr); diff --git a/source/common/upstream/logical_host.h b/source/common/upstream/logical_host.h index 77b7b12cd745..3e296b7e7548 100644 --- a/source/common/upstream/logical_host.h +++ b/source/common/upstream/logical_host.h @@ -88,6 +88,9 @@ class RealHostDescription : public HostDescription { return logical_host_->outlierDetector(); } HostStats& stats() const override { return logical_host_->stats(); } + const std::string& hostnameForHealthChecks() const override { + return logical_host_->hostnameForHealthChecks(); + } const std::string& hostname() const override { return logical_host_->hostname(); } Network::Address::InstanceConstSharedPtr address() const override { return address_; } const envoy::config::core::v3::Locality& locality() const override { diff --git a/source/common/upstream/maglev_lb.cc b/source/common/upstream/maglev_lb.cc index ec6faf679f7e..c7e454a97ea7 100644 --- a/source/common/upstream/maglev_lb.cc +++ b/source/common/upstream/maglev_lb.cc @@ -7,7 +7,7 @@ namespace Upstream { MaglevTable::MaglevTable(const NormalizedHostWeightVector& normalized_host_weights, double max_normalized_weight, uint64_t table_size, - MaglevLoadBalancerStats& stats) + bool use_hostname_for_hashing, MaglevLoadBalancerStats& stats) : table_size_(table_size), stats_(stats) { // TODO(mattklein123): The Maglev table must have a size that is a prime number for the algorithm // to work. Currently, the table size is not user configurable. In the future, if the table size @@ -26,7 +26,9 @@ MaglevTable::MaglevTable(const NormalizedHostWeightVector& normalized_host_weigh table_build_entries.reserve(normalized_host_weights.size()); for (const auto& host_weight : normalized_host_weights) { const auto& host = host_weight.first; - const std::string& address = host->address()->asString(); + const std::string& address = + use_hostname_for_hashing ? host->hostname() : host->address()->asString(); + ASSERT(!address.empty()); table_build_entries.emplace_back(host, HashUtil::xxHash64(address) % table_size_, (HashUtil::xxHash64(address, 1) % (table_size_ - 1)) + 1, host_weight.second); @@ -71,16 +73,25 @@ MaglevTable::MaglevTable(const NormalizedHostWeightVector& normalized_host_weigh if (ENVOY_LOG_CHECK_LEVEL(trace)) { for (uint64_t i = 0; i < table_.size(); i++) { - ENVOY_LOG(trace, "maglev: i={} host={}", i, table_[i]->address()->asString()); + ENVOY_LOG(trace, "maglev: i={} host={}", i, + use_hostname_for_hashing ? table_[i]->hostname() + : table_[i]->address()->asString()); } } } -HostConstSharedPtr MaglevTable::chooseHost(uint64_t hash) const { +HostConstSharedPtr MaglevTable::chooseHost(uint64_t hash, uint32_t attempt) const { if (table_.empty()) { return nullptr; } + if (attempt > 0) { + // If a retry host predicate is being applied, mutate the hash to choose an alternate host. + // By using value with most bits set for the retry attempts, we achieve a larger change in + // the hash, thereby reducing the likelihood that all retries are directed to a single host. + hash ^= ~0ULL - attempt + 1; + } + return table_[hash % table_size_]; } @@ -94,7 +105,11 @@ MaglevLoadBalancer::MaglevLoadBalancer( const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, uint64_t table_size) : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config), scope_(scope.createScope("maglev_lb.")), stats_(generateStats(*scope_)), - table_size_(table_size) {} + table_size_(table_size), + use_hostname_for_hashing_( + common_config.has_consistent_hashing_lb_config() + ? common_config.consistent_hashing_lb_config().use_hostname_for_hashing() + : false) {} MaglevLoadBalancerStats MaglevLoadBalancer::generateStats(Stats::Scope& scope) { return {ALL_MAGLEV_LOAD_BALANCER_STATS(POOL_GAUGE(scope))}; diff --git a/source/common/upstream/maglev_lb.h b/source/common/upstream/maglev_lb.h index 450c85536bd5..12a71e4fcb2d 100644 --- a/source/common/upstream/maglev_lb.h +++ b/source/common/upstream/maglev_lb.h @@ -34,10 +34,11 @@ class MaglevTable : public ThreadAwareLoadBalancerBase::HashingLoadBalancer, Logger::Loggable { public: MaglevTable(const NormalizedHostWeightVector& normalized_host_weights, - double max_normalized_weight, uint64_t table_size, MaglevLoadBalancerStats& stats); + double max_normalized_weight, uint64_t table_size, bool use_hostname_for_hashing, + MaglevLoadBalancerStats& stats); // ThreadAwareLoadBalancerBase::HashingLoadBalancer - HostConstSharedPtr chooseHost(uint64_t hash) const override; + HostConstSharedPtr chooseHost(uint64_t hash, uint32_t attempt) const override; // Recommended table size in section 5.3 of the paper. static const uint64_t DefaultTableSize = 65537; @@ -81,7 +82,7 @@ class MaglevLoadBalancer : public ThreadAwareLoadBalancerBase { createLoadBalancer(const NormalizedHostWeightVector& normalized_host_weights, double /* min_normalized_weight */, double max_normalized_weight) override { return std::make_shared(normalized_host_weights, max_normalized_weight, - table_size_, stats_); + table_size_, use_hostname_for_hashing_, stats_); } static MaglevLoadBalancerStats generateStats(Stats::Scope& scope); @@ -89,6 +90,7 @@ class MaglevLoadBalancer : public ThreadAwareLoadBalancerBase { Stats::ScopePtr scope_; MaglevLoadBalancerStats stats_; const uint64_t table_size_; + const bool use_hostname_for_hashing_; }; } // namespace Upstream diff --git a/source/common/upstream/ring_hash_lb.cc b/source/common/upstream/ring_hash_lb.cc index f018d1c9c3c4..7953d55a4bae 100644 --- a/source/common/upstream/ring_hash_lb.cc +++ b/source/common/upstream/ring_hash_lb.cc @@ -10,6 +10,7 @@ #include "common/common/assert.h" #include "common/upstream/load_balancer_impl.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" namespace Envoy { @@ -29,9 +30,13 @@ RingHashLoadBalancer::RingHashLoadBalancer( DefaultMaxRingSize) : DefaultMaxRingSize), hash_function_(config ? config.value().hash_function() - : HashFunction::Cluster_RingHashLbConfig_HashFunction_XX_HASH) { - // It's important to do any config validation here, rather than deferring to Ring's ctor, because - // any exceptions thrown here will be caught and handled properly. + : HashFunction::Cluster_RingHashLbConfig_HashFunction_XX_HASH), + use_hostname_for_hashing_( + common_config.has_consistent_hashing_lb_config() + ? common_config.consistent_hashing_lb_config().use_hostname_for_hashing() + : false) { + // It's important to do any config validation here, rather than deferring to Ring's ctor, + // because any exceptions thrown here will be caught and handled properly. if (min_ring_size_ > max_ring_size_) { throw EnvoyException(fmt::format("ring hash: minimum_ring_size ({}) > maximum_ring_size ({})", min_ring_size_, max_ring_size_)); @@ -42,7 +47,7 @@ RingHashLoadBalancerStats RingHashLoadBalancer::generateStats(Stats::Scope& scop return {ALL_RING_HASH_LOAD_BALANCER_STATS(POOL_GAUGE(scope))}; } -HostConstSharedPtr RingHashLoadBalancer::Ring::chooseHost(uint64_t h) const { +HostConstSharedPtr RingHashLoadBalancer::Ring::chooseHost(uint64_t h, uint32_t attempt) const { if (ring_.empty()) { return nullptr; } @@ -53,18 +58,20 @@ HostConstSharedPtr RingHashLoadBalancer::Ring::chooseHost(uint64_t h) const { // change them! int64_t lowp = 0; int64_t highp = ring_.size(); + int64_t midp = 0; while (true) { - int64_t midp = (lowp + highp) / 2; + midp = (lowp + highp) / 2; if (midp == static_cast(ring_.size())) { - return ring_[0].host_; + midp = 0; + break; } uint64_t midval = ring_[midp].hash_; uint64_t midval1 = midp == 0 ? 0 : ring_[midp - 1].hash_; if (h <= midval && h > midval1) { - return ring_[midp].host_; + break; } if (midval < h) { @@ -74,16 +81,26 @@ HostConstSharedPtr RingHashLoadBalancer::Ring::chooseHost(uint64_t h) const { } if (lowp > highp) { - return ring_[0].host_; + midp = 0; + break; } } + + // If a retry host predicate is being applied, behave as if this host was not in the ring. + // Note that this does not guarantee a different host: e.g., attempt == ring_.size() or + // when the offset causes us to select the same host at another location in the ring. + if (attempt > 0) { + midp = (midp + attempt) % ring_.size(); + } + + return ring_[midp].host_; } using HashFunction = envoy::config::cluster::v3::Cluster::RingHashLbConfig::HashFunction; RingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_host_weights, double min_normalized_weight, uint64_t min_ring_size, uint64_t max_ring_size, HashFunction hash_function, - RingHashLoadBalancerStats& stats) + bool use_hostname_for_hashing, RingHashLoadBalancerStats& stats) : stats_(stats) { ENVOY_LOG(trace, "ring hash: building ring"); @@ -95,8 +112,8 @@ RingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_ho // Scale up the number of hashes per host such that the least-weighted host gets a whole number // of hashes on the ring. Other hosts might not end up with whole numbers, and that's fine (the // ring-building algorithm below can handle this). This preserves the original implementation's - // behavior: when weights aren't provided, all hosts should get an equal number of hashes. In the - // case where this number exceeds the max_ring_size, it's scaled back down to fit. + // behavior: when weights aren't provided, all hosts should get an equal number of hashes. In + // the case where this number exceeds the max_ring_size, it's scaled back down to fit. const double scale = std::min(std::ceil(min_normalized_weight * min_ring_size) / min_normalized_weight, static_cast(max_ring_size)); @@ -105,10 +122,10 @@ RingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_ho const uint64_t ring_size = std::ceil(scale); ring_.reserve(ring_size); - // Populate the hash ring by walking through the (host, weight) pairs in normalized_host_weights, - // and generating (scale * weight) hashes for each host. Since these aren't necessarily whole - // numbers, we maintain running sums -- current_hashes and target_hashes -- which allows us to - // populate the ring in a mostly stable way. + // Populate the hash ring by walking through the (host, weight) pairs in + // normalized_host_weights, and generating (scale * weight) hashes for each host. Since these + // aren't necessarily whole numbers, we maintain running sums -- current_hashes and + // target_hashes -- which allows us to populate the ring in a mostly stable way. // // For example, suppose we have 4 hosts, each with a normalized weight of 0.25, and a scale of // 6.0 (because the max_ring_size is 6). That means we want to generate 1.5 hashes per host. @@ -122,37 +139,32 @@ RingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_ho // For stats reporting, keep track of the minimum and maximum actual number of hashes per host. // Users should hopefully pay attention to these numbers and alert if min_hashes_per_host is too // low, since that implies an inaccurate request distribution. - char hash_key_buffer[196]; + + absl::InlinedVector hash_key_buffer; double current_hashes = 0.0; double target_hashes = 0.0; uint64_t min_hashes_per_host = ring_size; uint64_t max_hashes_per_host = 0; for (const auto& entry : normalized_host_weights) { const auto& host = entry.first; - const std::string& address_string = host->address()->asString(); - uint64_t offset_start = address_string.size(); - - // Currently, we support both IP and UDS addresses. The UDS max path length is ~108 on all Unix - // platforms that I know of. Given that, we can use a 196 char buffer which is plenty of room - // for UDS, '_', and up to 21 characters for the node ID. To be on the super safe side, there - // is a RELEASE_ASSERT here that checks this, in case someone in the future adds some type of - // new address that is larger, or runs on a platform where UDS is larger. I don't think it's - // worth the defensive coding to deal with the heap allocation case (e.g. via - // absl::InlinedVector) at the current time. - RELEASE_ASSERT( - address_string.size() + 1 + StringUtil::MIN_ITOA_OUT_LEN <= sizeof(hash_key_buffer), ""); - memcpy(hash_key_buffer, address_string.c_str(), offset_start); - hash_key_buffer[offset_start++] = '_'; + const std::string& address_string = + use_hostname_for_hashing ? host->hostname() : host->address()->asString(); + ASSERT(!address_string.empty()); + + hash_key_buffer.assign(address_string.begin(), address_string.end()); + hash_key_buffer.emplace_back('_'); + auto offset_start = hash_key_buffer.end(); // As noted above: maintain current_hashes and target_hashes as running sums across the entire // host set. `i` is needed only to construct the hash key, and tally min/max hashes per host. target_hashes += scale * entry.second; uint64_t i = 0; while (current_hashes < target_hashes) { - const uint64_t total_hash_key_len = - offset_start + - StringUtil::itoa(hash_key_buffer + offset_start, StringUtil::MIN_ITOA_OUT_LEN, i); - absl::string_view hash_key(hash_key_buffer, total_hash_key_len); + const std::string i_str = absl::StrCat("", i); + hash_key_buffer.insert(offset_start, i_str.begin(), i_str.end()); + + absl::string_view hash_key(static_cast(hash_key_buffer.data()), + hash_key_buffer.size()); const uint64_t hash = (hash_function == HashFunction::Cluster_RingHashLbConfig_HashFunction_MURMUR_HASH_2) @@ -163,6 +175,7 @@ RingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_ho ring_.push_back({hash, host}); ++i; ++current_hashes; + hash_key_buffer.erase(offset_start, hash_key_buffer.end()); } min_hashes_per_host = std::min(i, min_hashes_per_host); max_hashes_per_host = std::max(i, max_hashes_per_host); @@ -173,7 +186,9 @@ RingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_ho }); if (ENVOY_LOG_CHECK_LEVEL(trace)) { for (const auto& entry : ring_) { - ENVOY_LOG(trace, "ring hash: host={} hash={}", entry.host_->address()->asString(), + ENVOY_LOG(trace, "ring hash: host={} hash={}", + use_hostname_for_hashing ? entry.host_->hostname() + : entry.host_->address()->asString(), entry.hash_); } } diff --git a/source/common/upstream/ring_hash_lb.h b/source/common/upstream/ring_hash_lb.h index dd814a753cc7..9353d34715a7 100644 --- a/source/common/upstream/ring_hash_lb.h +++ b/source/common/upstream/ring_hash_lb.h @@ -59,10 +59,10 @@ class RingHashLoadBalancer : public ThreadAwareLoadBalancerBase, struct Ring : public HashingLoadBalancer { Ring(const NormalizedHostWeightVector& normalized_host_weights, double min_normalized_weight, uint64_t min_ring_size, uint64_t max_ring_size, HashFunction hash_function, - RingHashLoadBalancerStats& stats); + bool use_hostname_for_hashing, RingHashLoadBalancerStats& stats); // ThreadAwareLoadBalancerBase::HashingLoadBalancer - HostConstSharedPtr chooseHost(uint64_t hash) const override; + HostConstSharedPtr chooseHost(uint64_t hash, uint32_t attempt) const override; std::vector ring_; @@ -75,7 +75,8 @@ class RingHashLoadBalancer : public ThreadAwareLoadBalancerBase, createLoadBalancer(const NormalizedHostWeightVector& normalized_host_weights, double min_normalized_weight, double /* max_normalized_weight */) override { return std::make_shared(normalized_host_weights, min_normalized_weight, min_ring_size_, - max_ring_size_, hash_function_, stats_); + max_ring_size_, hash_function_, use_hostname_for_hashing_, + stats_); } static RingHashLoadBalancerStats generateStats(Stats::Scope& scope); @@ -88,6 +89,7 @@ class RingHashLoadBalancer : public ThreadAwareLoadBalancerBase, const uint64_t min_ring_size_; const uint64_t max_ring_size_; const HashFunction hash_function_; + const bool use_hostname_for_hashing_; }; } // namespace Upstream diff --git a/source/common/upstream/static_cluster.cc b/source/common/upstream/static_cluster.cc index d34dbeaa2735..2657b809465f 100644 --- a/source/common/upstream/static_cluster.cc +++ b/source/common/upstream/static_cluster.cc @@ -28,8 +28,8 @@ StaticClusterImpl::StaticClusterImpl( priority_state_manager_->initializePriorityFor(locality_lb_endpoint); for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { priority_state_manager_->registerHostForPriority( - "", resolveProtoAddress(lb_endpoint.endpoint().address()), locality_lb_endpoint, - lb_endpoint); + lb_endpoint.endpoint().hostname(), resolveProtoAddress(lb_endpoint.endpoint().address()), + locality_lb_endpoint, lb_endpoint); } } } diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index abe6db0be597..1cf9de7230a2 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -169,8 +169,8 @@ void SubsetLoadBalancer::initSelectorFallbackSubset( HostPredicate predicate = std::bind(&SubsetLoadBalancer::hostMatches, this, default_subset_metadata_, std::placeholders::_1); selector_fallback_subset_default_ = std::make_shared(); - selector_fallback_subset_default_->priority_subset_.reset( - new PrioritySubsetImpl(*this, predicate, locality_weight_aware_, scale_locality_weight_)); + selector_fallback_subset_default_->priority_subset_ = std::make_shared( + *this, predicate, locality_weight_aware_, scale_locality_weight_); } } diff --git a/source/common/upstream/thread_aware_lb_impl.cc b/source/common/upstream/thread_aware_lb_impl.cc index 10855dd46c00..5ede9a31b64f 100644 --- a/source/common/upstream/thread_aware_lb_impl.cc +++ b/source/common/upstream/thread_aware_lb_impl.cc @@ -142,7 +142,20 @@ ThreadAwareLoadBalancerBase::LoadBalancerImpl::chooseHost(LoadBalancerContext* c if (per_priority_state->global_panic_) { stats_.lb_healthy_panic_.inc(); } - return per_priority_state->current_lb_->chooseHost(h); + + HostConstSharedPtr host; + const uint32_t max_attempts = context ? context->hostSelectionRetryCount() + 1 : 1; + for (uint32_t i = 0; i < max_attempts; ++i) { + host = per_priority_state->current_lb_->chooseHost(h, i); + + // If host selection failed or the host is accepted by the filter, return. + // Otherwise, try again. + if (!host || !context || !context->shouldSelectAnotherHost(*host)) { + return host; + } + } + + return host; } LoadBalancerPtr ThreadAwareLoadBalancerBase::LoadBalancerFactoryImpl::create() { diff --git a/source/common/upstream/thread_aware_lb_impl.h b/source/common/upstream/thread_aware_lb_impl.h index 5095d2156850..f2b07d6d3708 100644 --- a/source/common/upstream/thread_aware_lb_impl.h +++ b/source/common/upstream/thread_aware_lb_impl.h @@ -24,7 +24,7 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL class HashingLoadBalancer { public: virtual ~HashingLoadBalancer() = default; - virtual HostConstSharedPtr chooseHost(uint64_t hash) const PURE; + virtual HostConstSharedPtr chooseHost(uint64_t hash, uint32_t attempt) const PURE; }; using HashingLoadBalancerSharedPtr = std::shared_ptr; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index c927ce9f04f1..49e56528834f 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -244,7 +244,8 @@ HostDescriptionImpl::HostDescriptionImpl( const envoy::config::core::v3::Locality& locality, const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig& health_check_config, uint32_t priority) - : cluster_(cluster), hostname_(hostname), address_(dest_address), + : cluster_(cluster), hostname_(hostname), + health_checks_hostname_(health_check_config.hostname()), address_(dest_address), canary_(Config::Metadata::metadataValue(metadata.get(), Config::MetadataFilters::get().ENVOY_LB, Config::MetadataEnvoyLbKeys::get().CANARY) diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 588e2cf55344..bdb40c9d6841 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -120,6 +120,7 @@ class HostDescriptionImpl : virtual public HostDescription, } } HostStats& stats() const override { return stats_; } + const std::string& hostnameForHealthChecks() const override { return health_checks_hostname_; } const std::string& hostname() const override { return hostname_; } Network::Address::InstanceConstSharedPtr address() const override { return address_; } Network::Address::InstanceConstSharedPtr healthCheckAddress() const override { @@ -140,6 +141,7 @@ class HostDescriptionImpl : virtual public HostDescription, protected: ClusterInfoConstSharedPtr cluster_; const std::string hostname_; + const std::string health_checks_hostname_; Network::Address::InstanceConstSharedPtr address_; Network::Address::InstanceConstSharedPtr health_check_address_; std::atomic canary_; diff --git a/source/extensions/access_loggers/wasm/BUILD b/source/extensions/access_loggers/wasm/BUILD index 0209abd54562..a620c68161e0 100644 --- a/source/extensions/access_loggers/wasm/BUILD +++ b/source/extensions/access_loggers/wasm/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -19,10 +20,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "unknown", deps = [ ":wasm_access_log_lib", "//include/envoy/registry", diff --git a/source/extensions/common/aws/utility.cc b/source/extensions/common/aws/utility.cc index a3d141577f78..fb9c00918f06 100644 --- a/source/extensions/common/aws/utility.cc +++ b/source/extensions/common/aws/utility.cc @@ -28,7 +28,7 @@ Utility::canonicalizeHeaders(const Http::RequestHeaderMap& headers) { // Skip headers that are likely to mutate, when crossing proxies const auto key = entry.key().getStringView(); if (key == Http::Headers::get().ForwardedFor.get() || - key == Http::Headers::get().ForwardedProto.get()) { + key == Http::Headers::get().ForwardedProto.get() || key == "x-amzn-trace-id") { return Http::HeaderMap::Iterate::Continue; } diff --git a/source/extensions/common/crypto/utility_impl.cc b/source/extensions/common/crypto/utility_impl.cc index 2fa8c6e8441c..b8c1b151ef54 100644 --- a/source/extensions/common/crypto/utility_impl.cc +++ b/source/extensions/common/crypto/utility_impl.cc @@ -17,10 +17,7 @@ std::vector UtilityImpl::getSha256Digest(const Buffer::Instance& buffer bssl::ScopedEVP_MD_CTX ctx; auto rc = EVP_DigestInit(ctx.get(), EVP_sha256()); RELEASE_ASSERT(rc == 1, "Failed to init digest context"); - const auto num_slices = buffer.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - buffer.getRawSlices(slices.begin(), num_slices); - for (const auto& slice : slices) { + for (const auto& slice : buffer.getRawSlices()) { rc = EVP_DigestUpdate(ctx.get(), slice.mem_, slice.len_); RELEASE_ASSERT(rc == 1, "Failed to update digest"); } diff --git a/source/extensions/common/proxy_protocol/BUILD b/source/extensions/common/proxy_protocol/BUILD new file mode 100644 index 000000000000..755af8cae0d0 --- /dev/null +++ b/source/extensions/common/proxy_protocol/BUILD @@ -0,0 +1,20 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "proxy_protocol_header_lib", + srcs = ["proxy_protocol_header.cc"], + hdrs = ["proxy_protocol_header.h"], + deps = [ + "//include/envoy/buffer:buffer_interface", + "//include/envoy/network:address_interface", + "//source/common/network:address_lib", + ], +) diff --git a/source/extensions/common/proxy_protocol/proxy_protocol_header.cc b/source/extensions/common/proxy_protocol/proxy_protocol_header.cc new file mode 100644 index 000000000000..c6f9d0a9f060 --- /dev/null +++ b/source/extensions/common/proxy_protocol/proxy_protocol_header.cc @@ -0,0 +1,107 @@ +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" + +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/network/address.h" + +#include "common/network/address_impl.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace ProxyProtocol { + +void generateV1Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, + uint32_t dst_port, Network::Address::IpVersion ip_version, + Buffer::Instance& out) { + std::ostringstream stream; + stream << PROXY_PROTO_V1_SIGNATURE; + + switch (ip_version) { + case Network::Address::IpVersion::v4: + stream << PROXY_PROTO_V1_AF_INET << " "; + break; + case Network::Address::IpVersion::v6: + stream << PROXY_PROTO_V1_AF_INET6 << " "; + break; + } + + stream << src_addr << " "; + stream << dst_addr << " "; + stream << src_port << " "; + stream << dst_port << "\r\n"; + + out.add(stream.str()); +} + +void generateV2Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, + uint32_t dst_port, Network::Address::IpVersion ip_version, + Buffer::Instance& out) { + out.add(PROXY_PROTO_V2_SIGNATURE, PROXY_PROTO_V2_SIGNATURE_LEN); + + const uint8_t version_and_command = PROXY_PROTO_V2_VERSION << 4 | PROXY_PROTO_V2_ONBEHALF_OF; + out.add(&version_and_command, 1); + + uint8_t address_family_and_protocol; + switch (ip_version) { + case Network::Address::IpVersion::v4: + address_family_and_protocol = PROXY_PROTO_V2_AF_INET << 4; + break; + case Network::Address::IpVersion::v6: + address_family_and_protocol = PROXY_PROTO_V2_AF_INET6 << 4; + break; + } + address_family_and_protocol |= PROXY_PROTO_V2_TRANSPORT_STREAM; + out.add(&address_family_and_protocol, 1); + + uint8_t addr_length[2]{0, 0}; + switch (ip_version) { + case Network::Address::IpVersion::v4: { + addr_length[1] = PROXY_PROTO_V2_ADDR_LEN_INET; + out.add(addr_length, 2); + + uint8_t addrs[8]; + const auto net_src_addr = + Network::Address::Ipv4Instance(src_addr, src_port).ip()->ipv4()->address(); + const auto net_dst_addr = + Network::Address::Ipv4Instance(dst_addr, dst_port).ip()->ipv4()->address(); + memcpy(addrs, &net_src_addr, 4); + memcpy(&addrs[4], &net_dst_addr, 4); + out.add(addrs, 8); + break; + } + case Network::Address::IpVersion::v6: { + addr_length[1] = PROXY_PROTO_V2_ADDR_LEN_INET6; + out.add(addr_length, 2); + + uint8_t addrs[32]; + const auto net_src_addr = + Network::Address::Ipv6Instance(src_addr, src_port).ip()->ipv6()->address(); + const auto net_dst_addr = + Network::Address::Ipv6Instance(dst_addr, dst_port).ip()->ipv6()->address(); + memcpy(addrs, &net_src_addr, 16); + memcpy(&addrs[16], &net_dst_addr, 16); + out.add(addrs, 32); + break; + } + } + + uint8_t ports[4]; + const auto net_src_port = htons(static_cast(src_port)); + const auto net_dst_port = htons(static_cast(dst_port)); + memcpy(ports, &net_src_port, 2); + memcpy(&ports[2], &net_dst_port, 2); + out.add(ports, 4); +} + +void generateV2LocalHeader(Buffer::Instance& out) { + out.add(PROXY_PROTO_V2_SIGNATURE, PROXY_PROTO_V2_SIGNATURE_LEN); + const uint8_t addr_fam_protocol_and_length[4]{PROXY_PROTO_V2_VERSION << 4, 0, 0, 0}; + out.add(addr_fam_protocol_and_length, 4); +} + +} // namespace ProxyProtocol +} // namespace Common +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/common/proxy_protocol/proxy_protocol_header.h b/source/extensions/common/proxy_protocol/proxy_protocol_header.h new file mode 100644 index 000000000000..81d9dc1f8951 --- /dev/null +++ b/source/extensions/common/proxy_protocol/proxy_protocol_header.h @@ -0,0 +1,55 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/network/address.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace ProxyProtocol { + +// See https://github.com/haproxy/haproxy/blob/master/doc/proxy-protocol.txt for definitions + +constexpr char PROXY_PROTO_V1_SIGNATURE[] = "PROXY "; +constexpr auto PROXY_PROTO_V1_AF_INET = "TCP4"; +constexpr auto PROXY_PROTO_V1_AF_INET6 = "TCP6"; +constexpr auto PROXY_PROTO_V1_UNKNOWN = "UNKNOWN"; + +constexpr char PROXY_PROTO_V2_SIGNATURE[] = "\x0d\x0a\x0d\x0a\x00\x0d\x0a\x51\x55\x49\x54\x0a"; + +constexpr uint32_t PROXY_PROTO_V1_SIGNATURE_LEN = 6; +constexpr uint32_t PROXY_PROTO_V2_SIGNATURE_LEN = 12; +constexpr uint32_t PROXY_PROTO_V2_HEADER_LEN = 16; + +constexpr uint32_t PROXY_PROTO_V2_VERSION = 0x2; +constexpr uint32_t PROXY_PROTO_V2_ONBEHALF_OF = 0x1; +constexpr uint32_t PROXY_PROTO_V2_LOCAL = 0x0; + +constexpr uint32_t PROXY_PROTO_V2_AF_INET = 0x1; +constexpr uint32_t PROXY_PROTO_V2_AF_INET6 = 0x2; +constexpr uint32_t PROXY_PROTO_V2_AF_UNIX = 0x3; + +constexpr uint8_t PROXY_PROTO_V2_TRANSPORT_STREAM = 0x1; +constexpr uint8_t PROXY_PROTO_V2_TRANSPORT_DGRAM = 0x2; + +constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNSPEC = 0; +constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_INET = 12; +constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_INET6 = 36; +constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNIX = 216; + +// Generates the v1 PROXY protocol header and adds it to the specified buffer +void generateV1Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, + uint32_t dst_port, Network::Address::IpVersion ip_version, + Buffer::Instance& out); +// Generates the v2 PROXY protocol header and adds it to the specified buffer +// TCP is assumed as the transport protocol +void generateV2Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, + uint32_t dst_port, Network::Address::IpVersion ip_version, + Buffer::Instance& out); +// Generates the v2 PROXY protocol local command header and adds it to the specified buffer +void generateV2LocalHeader(Buffer::Instance& out); + +} // namespace ProxyProtocol +} // namespace Common +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/common/tap/tap_config_base.cc b/source/extensions/common/tap/tap_config_base.cc index 5624d03b0dd2..d1542f26b976 100644 --- a/source/extensions/common/tap/tap_config_base.cc +++ b/source/extensions/common/tap/tap_config_base.cc @@ -28,9 +28,7 @@ bool Utility::addBufferToProtoBytes(envoy::data::tap::v3::Body& output_body, ASSERT(buffer_start_offset + buffer_length_to_copy <= data.length()); const uint32_t final_bytes_to_copy = std::min(max_buffered_bytes, buffer_length_to_copy); - const uint64_t num_slices = data.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - data.getRawSlices(slices.begin(), num_slices); + Buffer::RawSliceVector slices = data.getRawSlices(); trimSlices(slices, buffer_start_offset, final_bytes_to_copy); for (const Buffer::RawSlice& slice : slices) { output_body.mutable_as_bytes()->append(static_cast(slice.mem_), slice.len_); diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index 5f16e04b1681..a58d05d5ed0b 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -36,7 +36,7 @@ envoy_cc_library( "//source/extensions/filters/common/expr:evaluator_lib", "//source/extensions/filters/http:well_known_names", "@com_google_cel_cpp//eval/public:activation", - "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + "@envoy_api//envoy/config/wasm/v3:pkg_cc_proto", "@proxy_wasm_cpp_host//:include", "@proxy_wasm_cpp_sdk//:common_lib", ], @@ -84,7 +84,7 @@ envoy_cc_library( "@com_google_cel_cpp//eval/public:cel_value", "@com_google_cel_cpp//eval/public:value_export_util", "@com_google_cel_cpp//parser", - "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + "@envoy_api//envoy/config/wasm/v3:pkg_cc_proto", "@proxy_wasm_cpp_host//:lib", ], ) diff --git a/source/extensions/common/wasm/context.cc b/source/extensions/common/wasm/context.cc index eb795acde450..28717fe08192 100644 --- a/source/extensions/common/wasm/context.cc +++ b/source/extensions/common/wasm/context.cc @@ -5,7 +5,7 @@ #include #include "envoy/common/exception.h" -#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/config/wasm/v3/wasm.pb.validate.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" #include "envoy/local_info/local_info.h" @@ -202,7 +202,7 @@ Http::RequestTrailerMapPtr buildRequestTrailerMapFromPairs(const Pairs& pairs) { // Note: because of the lack of a string_view interface for addCopy and // the lack of an interface to add an entry with an empty value and return // the entry, there is no efficient way to prevent either a double copy - // of the valueor a double lookup of the entry. + // of the value or a double lookup of the entry. map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second)); } return map; @@ -214,7 +214,7 @@ Http::RequestHeaderMapPtr buildRequestHeaderMapFromPairs(const Pairs& pairs) { // Note: because of the lack of a string_view interface for addCopy and // the lack of an interface to add an entry with an empty value and return // the entry, there is no efficient way to prevent either a double copy - // of the valueor a double lookup of the entry. + // of the value or a double lookup of the entry. map->addCopy(Http::LowerCaseString(std::string(p.first)), std::string(p.second)); } return map; @@ -360,13 +360,13 @@ WasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* re return WasmResult::Ok; } case CelValue::Type::kDuration: { - // Warning: loss of precision to nano-seconds + // Warning: loss of precision to nanoseconds int64_t out = absl::ToInt64Nanoseconds(value.DurationOrDie()); result->assign(reinterpret_cast(&out), sizeof(int64_t)); return WasmResult::Ok; } case CelValue::Type::kTimestamp: { - // Warning: loss of precision to nano-seconds + // Warning: loss of precision to nanoseconds int64_t out = absl::ToUnixNanos(value.TimestampOrDie()); result->assign(reinterpret_cast(&out), sizeof(int64_t)); return WasmResult::Ok; diff --git a/source/extensions/common/wasm/context.h b/source/extensions/common/wasm/context.h index 7ddbf8ec5a72..772f7e459c1d 100644 --- a/source/extensions/common/wasm/context.h +++ b/source/extensions/common/wasm/context.h @@ -299,10 +299,12 @@ class Context : public proxy_wasm::ContextBase, struct AsyncClientHandler : public Http::AsyncClient::Callbacks { // Http::AsyncClient::Callbacks - void onSuccess(Envoy::Http::ResponseMessagePtr&& response) override { + void onSuccess(const Http::AsyncClient::Request&, + Envoy::Http::ResponseMessagePtr&& response) override { context_->onHttpCallSuccess(token_, std::move(response)); } - void onFailure(Http::AsyncClient::FailureReason reason) override { + void onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) override { context_->onHttpCallFailure(token_, reason); } diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc index 17437f8f3cfc..7131ec00ff69 100644 --- a/source/extensions/common/wasm/wasm.cc +++ b/source/extensions/common/wasm/wasm.cc @@ -7,7 +7,7 @@ #include #include "envoy/common/exception.h" -#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/config/wasm/v3/wasm.pb.validate.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" #include "envoy/local_info/local_info.h" diff --git a/source/extensions/common/wasm/wasm.h b/source/extensions/common/wasm/wasm.h index 779cc96b9ae1..da4f39d91624 100644 --- a/source/extensions/common/wasm/wasm.h +++ b/source/extensions/common/wasm/wasm.h @@ -5,8 +5,10 @@ #include #include "envoy/common/exception.h" -#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/config/wasm/v3/wasm.pb.validate.h" +#include "envoy/http/filter.h" #include "envoy/server/lifecycle_notifier.h" +#include "envoy/server/wasm.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats.h" #include "envoy/thread_local/thread_local.h" @@ -37,10 +39,9 @@ struct WasmStats { ALL_WASM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; -class Context; -class WasmHandle; +using VmConfig = envoy::config::wasm::v3::VmConfig; -using VmConfig = envoy::extensions::wasm::v3::VmConfig; +class WasmHandle; // Wasm execution instance. Manages the Envoy side of the Wasm interface. class Wasm : public WasmBase, Logger::Loggable { diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 6c4bb5bed53b..ac557fa312fe 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -112,6 +112,7 @@ EXTENSIONS = { # UDP filters # + "envoy.filters.udp_listener.dns_filter": "//source/extensions/filters/udp/dns_filter:config", "envoy.filters.udp_listener.udp_proxy": "//source/extensions/filters/udp/udp_proxy:config", # diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc index 19373693901a..6df67169d09e 100644 --- a/source/extensions/filters/common/expr/context.cc +++ b/source/extensions/filters/common/expr/context.cc @@ -31,19 +31,19 @@ absl::optional extractSslInfo(const Ssl::ConnectionInfo& ssl_info, } else if (value == SubjectPeerCertificate) { return CelValue::CreateString(&ssl_info.subjectPeerCertificate()); } else if (value == URISanLocalCertificate) { - if (ssl_info.uriSanLocalCertificate().size() > 0) { + if (!ssl_info.uriSanLocalCertificate().empty()) { return CelValue::CreateString(&ssl_info.uriSanLocalCertificate()[0]); } } else if (value == URISanPeerCertificate) { - if (ssl_info.uriSanPeerCertificate().size() > 0) { + if (!ssl_info.uriSanPeerCertificate().empty()) { return CelValue::CreateString(&ssl_info.uriSanPeerCertificate()[0]); } } else if (value == DNSSanLocalCertificate) { - if (ssl_info.dnsSansLocalCertificate().size() > 0) { + if (!ssl_info.dnsSansLocalCertificate().empty()) { return CelValue::CreateString(&ssl_info.dnsSansLocalCertificate()[0]); } } else if (value == DNSSanPeerCertificate) { - if (ssl_info.dnsSansPeerCertificate().size() > 0) { + if (!ssl_info.dnsSansPeerCertificate().empty()) { return CelValue::CreateString(&ssl_info.dnsSansPeerCertificate()[0]); } } diff --git a/source/extensions/filters/common/expr/evaluator.cc b/source/extensions/filters/common/expr/evaluator.cc index 75c3656426e8..0bd1d3554f19 100644 --- a/source/extensions/filters/common/expr/evaluator.cc +++ b/source/extensions/filters/common/expr/evaluator.cc @@ -71,7 +71,7 @@ absl::optional evaluate(const Expression& expr, Protobuf::Arena* arena const Http::ResponseHeaderMap* response_headers, const Http::ResponseTrailerMap* response_trailers) { auto activation = createActivation(info, request_headers, response_headers, response_trailers); - auto eval_status = expr.Evaluate(*activation.get(), arena); + auto eval_status = expr.Evaluate(*activation, arena); if (!eval_status.ok()) { return {}; } diff --git a/source/extensions/filters/common/ext_authz/BUILD b/source/extensions/filters/common/ext_authz/BUILD index 0061facbddbc..58ed8316353e 100644 --- a/source/extensions/filters/common/ext_authz/BUILD +++ b/source/extensions/filters/common/ext_authz/BUILD @@ -13,6 +13,7 @@ envoy_cc_library( hdrs = ["ext_authz.h"], deps = [ "//include/envoy/http:codes_interface", + "//include/envoy/stream_info:stream_info_interface", "//source/common/tracing:http_tracer_lib", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/common/ext_authz/ext_authz.h b/source/extensions/filters/common/ext_authz/ext_authz.h index 0a763efdee3f..725f534090f7 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz.h +++ b/source/extensions/filters/common/ext_authz/ext_authz.h @@ -8,6 +8,7 @@ #include "envoy/common/pure.h" #include "envoy/http/codes.h" #include "envoy/service/auth/v3/external_auth.pb.h" +#include "envoy/stream_info/stream_info.h" #include "envoy/tracing/http_tracer.h" #include "common/singleton/const_singleton.h" @@ -90,11 +91,11 @@ class Client { * NOTE: The callback may happen within the calling stack. * @param request is the proto message with the attributes of the specific payload. * @param parent_span source for generating an egress child span as part of the trace. - * + * @param stream_info supplies the client's stream info. */ virtual void check(RequestCallbacks& callback, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span& parent_span) PURE; + Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) PURE; }; using ClientPtr = std::unique_ptr; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index 62cfa35252f0..d96756eaf43b 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -37,7 +37,7 @@ void GrpcClientImpl::cancel() { void GrpcClientImpl::check(RequestCallbacks& callbacks, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span& parent_span) { + Tracing::Span& parent_span, const StreamInfo::StreamInfo&) { ASSERT(callbacks_ == nullptr); callbacks_ = &callbacks; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h index cdd7deb02609..a678505ba322 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h @@ -48,7 +48,7 @@ class GrpcClientImpl : public Client, public ExtAuthzAsyncCallbacks { // ExtAuthz::Client void cancel() override; void check(RequestCallbacks& callbacks, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span& parent_span) override; + Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) override; // Grpc::AsyncRequestCallbacks void onCreateInitialMetadata(Http::RequestHeaderMap&) override {} diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index fa207cc411ea..3df000d46e12 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -128,11 +128,11 @@ ClientConfig::ClientConfig(const envoy::extensions::filters::http::ext_authz::v3 upstream_header_matchers_(toUpstreamMatchers( config.http_service().authorization_response().allowed_upstream_headers(), enable_case_sensitive_string_matcher_)), - authorization_headers_to_add_( - toHeadersAdd(config.http_service().authorization_request().headers_to_add())), cluster_name_(config.http_service().server_uri().cluster()), timeout_(timeout), path_prefix_(path_prefix), - tracing_name_(fmt::format("async {} egress", config.http_service().server_uri().cluster())) {} + tracing_name_(fmt::format("async {} egress", config.http_service().server_uri().cluster())), + request_headers_parser_(Router::HeaderParser::configure( + config.http_service().authorization_request().headers_to_add(), false)) {} MatcherSharedPtr ClientConfig::toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& list, @@ -190,16 +190,6 @@ ClientConfig::toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatch createStringMatchers(list, disable_lowercase_string_matcher)); } -Http::LowerCaseStrPairVector ClientConfig::toHeadersAdd( - const Protobuf::RepeatedPtrField& headers) { - Http::LowerCaseStrPairVector header_vec; - header_vec.reserve(headers.size()); - for (const auto& header : headers) { - header_vec.emplace_back(Http::LowerCaseString(header.key()), header.value()); - } - return header_vec; -} - RawHttpClientImpl::RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config, TimeSource& time_source) : cm_(cm), config_(config), time_source_(time_source) {} @@ -222,7 +212,8 @@ void RawHttpClientImpl::cancel() { // Client void RawHttpClientImpl::check(RequestCallbacks& callbacks, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span& parent_span) { + Tracing::Span& parent_span, + const StreamInfo::StreamInfo& stream_info) { ASSERT(callbacks_ == nullptr); ASSERT(span_ == nullptr); callbacks_ = &callbacks; @@ -255,9 +246,7 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, } } - for (const auto& header_to_add : config_->headersToAdd()) { - headers->setReference(header_to_add.first, header_to_add.second); - } + config_->requestHeaderParser().evaluateHeaders(*headers, stream_info); Http::RequestMessagePtr message = std::make_unique(std::move(headers)); @@ -286,14 +275,16 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, } } -void RawHttpClientImpl::onSuccess(Http::ResponseMessagePtr&& message) { +void RawHttpClientImpl::onSuccess(const Http::AsyncClient::Request&, + Http::ResponseMessagePtr&& message) { callbacks_->onComplete(toResponse(std::move(message))); span_->finishSpan(); callbacks_ = nullptr; span_ = nullptr; } -void RawHttpClientImpl::onFailure(Http::AsyncClient::FailureReason reason) { +void RawHttpClientImpl::onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) { ASSERT(reason == Http::AsyncClient::FailureReason::Reset); callbacks_->onComplete(std::make_unique(errorResponse())); span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index 23a1e69aab76..7a616a852d97 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -9,6 +9,7 @@ #include "common/common/logger.h" #include "common/common/matchers.h" +#include "common/router/header_parser.h" #include "common/runtime/runtime_protos.h" #include "extensions/filters/common/ext_authz/ext_authz.h" @@ -108,6 +109,11 @@ class ClientConfig { */ const std::string& tracingName() { return tracing_name_; } + /** + * Returns the configured request header parser. + */ + const Router::HeaderParser& requestHeaderParser() const { return *request_headers_parser_; } + private: static MatcherSharedPtr toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher, @@ -118,8 +124,6 @@ class ClientConfig { static MatcherSharedPtr toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher, bool enable_case_sensitive_string_matcher); - static Http::LowerCaseStrPairVector - toHeadersAdd(const Protobuf::RepeatedPtrField&); const bool enable_case_sensitive_string_matcher_; const MatcherSharedPtr request_header_matchers_; @@ -130,6 +134,7 @@ class ClientConfig { const std::chrono::milliseconds timeout_; const std::string path_prefix_; const std::string tracing_name_; + Router::HeaderParserPtr request_headers_parser_; }; using ClientConfigSharedPtr = std::shared_ptr; @@ -152,11 +157,12 @@ class RawHttpClientImpl : public Client, // ExtAuthz::Client void cancel() override; void check(RequestCallbacks& callbacks, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span&) override; + Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) override; // Http::AsyncClient::Callbacks - void onSuccess(Http::ResponseMessagePtr&& message) override; - void onFailure(Http::AsyncClient::FailureReason reason) override; + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& message) override; + void onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) override; private: ResponsePtr toResponse(Http::ResponseMessagePtr message); diff --git a/source/extensions/filters/common/rbac/matchers.cc b/source/extensions/filters/common/rbac/matchers.cc index 2bb8e56e00f1..123f394d6523 100644 --- a/source/extensions/filters/common/rbac/matchers.cc +++ b/source/extensions/filters/common/rbac/matchers.cc @@ -19,7 +19,8 @@ MatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v3::Permission& case envoy::config::rbac::v3::Permission::RuleCase::kHeader: return std::make_shared(permission.header()); case envoy::config::rbac::v3::Permission::RuleCase::kDestinationIp: - return std::make_shared(permission.destination_ip(), true); + return std::make_shared(permission.destination_ip(), + IPMatcher::Type::DownstreamLocal); case envoy::config::rbac::v3::Permission::RuleCase::kDestinationPort: return std::make_shared(permission.destination_port()); case envoy::config::rbac::v3::Permission::RuleCase::kAny: @@ -46,7 +47,14 @@ MatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v3::Principal& case envoy::config::rbac::v3::Principal::IdentifierCase::kAuthenticated: return std::make_shared(principal.authenticated()); case envoy::config::rbac::v3::Principal::IdentifierCase::kSourceIp: - return std::make_shared(principal.source_ip(), false); + return std::make_shared(principal.source_ip(), + IPMatcher::Type::ConnectionRemote); + case envoy::config::rbac::v3::Principal::IdentifierCase::kDirectRemoteIp: + return std::make_shared(principal.direct_remote_ip(), + IPMatcher::Type::DownstreamDirectRemote); + case envoy::config::rbac::v3::Principal::IdentifierCase::kRemoteIp: + return std::make_shared(principal.remote_ip(), + IPMatcher::Type::DownstreamRemote); case envoy::config::rbac::v3::Principal::IdentifierCase::kHeader: return std::make_shared(principal.header()); case envoy::config::rbac::v3::Principal::IdentifierCase::kAny: @@ -123,17 +131,30 @@ bool HeaderMatcher::matches(const Network::Connection&, } bool IPMatcher::matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap&, - const StreamInfo::StreamInfo&) const { - const Envoy::Network::Address::InstanceConstSharedPtr& ip = - destination_ ? connection.localAddress() : connection.remoteAddress(); - + const StreamInfo::StreamInfo& info) const { + Envoy::Network::Address::InstanceConstSharedPtr ip; + switch (type_) { + case ConnectionRemote: + ip = connection.remoteAddress(); + break; + case DownstreamLocal: + ip = info.downstreamLocalAddress(); + break; + case DownstreamDirectRemote: + ip = info.downstreamDirectRemoteAddress(); + break; + case DownstreamRemote: + ip = info.downstreamRemoteAddress(); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } return range_.isInRange(*ip.get()); } -bool PortMatcher::matches(const Network::Connection& connection, - const Envoy::Http::RequestHeaderMap&, - const StreamInfo::StreamInfo&) const { - const Envoy::Network::Address::Ip* ip = connection.localAddress().get()->ip(); +bool PortMatcher::matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap&, + const StreamInfo::StreamInfo& info) const { + const Envoy::Network::Address::Ip* ip = info.downstreamLocalAddress().get()->ip(); return ip && ip->port() == port_; } diff --git a/source/extensions/filters/common/rbac/matchers.h b/source/extensions/filters/common/rbac/matchers.h index 3e880dda40b1..fcc10f41fdb1 100644 --- a/source/extensions/filters/common/rbac/matchers.h +++ b/source/extensions/filters/common/rbac/matchers.h @@ -132,20 +132,22 @@ class HeaderMatcher : public Matcher { }; /** - * Perform a match against an IP CIDR range. This rule can be applied to either the source - * (remote) or the destination (local) IP. + * Perform a match against an IP CIDR range. This rule can be applied to connection remote, + * downstream local address, downstream direct remote address or downstream remote address. */ class IPMatcher : public Matcher { public: - IPMatcher(const envoy::config::core::v3::CidrRange& range, bool destination) - : range_(Network::Address::CidrRange::create(range)), destination_(destination) {} + enum Type { ConnectionRemote = 0, DownstreamLocal, DownstreamDirectRemote, DownstreamRemote }; + + IPMatcher(const envoy::config::core::v3::CidrRange& range, Type type) + : range_(Network::Address::CidrRange::create(range)), type_(type) {} bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, - const StreamInfo::StreamInfo&) const override; + const StreamInfo::StreamInfo& info) const override; private: const Network::Address::CidrRange range_; - const bool destination_; + const Type type_; }; /** @@ -155,8 +157,8 @@ class PortMatcher : public Matcher { public: PortMatcher(const uint32_t port) : port_(port) {} - bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, - const StreamInfo::StreamInfo&) const override; + bool matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap&, + const StreamInfo::StreamInfo& info) const override; private: const uint32_t port_; diff --git a/source/extensions/filters/http/adaptive_concurrency/BUILD b/source/extensions/filters/http/adaptive_concurrency/BUILD index 4b757362b85a..1cff74436f07 100644 --- a/source/extensions/filters/http/adaptive_concurrency/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/BUILD @@ -20,7 +20,7 @@ envoy_cc_library( deps = [ "//include/envoy/http:filter_interface", "//source/extensions/filters/http:well_known_names", - "//source/extensions/filters/http/adaptive_concurrency/concurrency_controller:concurrency_controller_lib", + "//source/extensions/filters/http/adaptive_concurrency/controller:controller_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", "@envoy_api//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg_cc_proto", ], @@ -36,7 +36,7 @@ envoy_cc_extension( "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/adaptive_concurrency:adaptive_concurrency_filter_lib", - "//source/extensions/filters/http/adaptive_concurrency/concurrency_controller:concurrency_controller_lib", + "//source/extensions/filters/http/adaptive_concurrency/controller:controller_lib", "//source/extensions/filters/http/common:factory_base_lib", "@envoy_api//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc index 012f81e54d3a..69e706cbf2b6 100644 --- a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc +++ b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc @@ -10,7 +10,7 @@ #include "common/common/assert.h" #include "common/protobuf/utility.h" -#include "extensions/filters/http/adaptive_concurrency/concurrency_controller/concurrency_controller.h" +#include "extensions/filters/http/adaptive_concurrency/controller/controller.h" #include "extensions/filters/http/well_known_names.h" namespace Envoy { @@ -37,7 +37,7 @@ Http::FilterHeadersStatus AdaptiveConcurrencyFilter::decodeHeaders(Http::Request return Http::FilterHeadersStatus::Continue; } - if (controller_->forwardingDecision() == ConcurrencyController::RequestForwardingAction::Block) { + if (controller_->forwardingDecision() == Controller::RequestForwardingAction::Block) { decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "", nullptr, absl::nullopt, "reached concurrency limit"); return Http::FilterHeadersStatus::StopIteration; diff --git a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h index dd375bf2d523..7424e1d3bdba 100644 --- a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h +++ b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h @@ -14,7 +14,7 @@ #include "common/common/cleanup.h" #include "common/runtime/runtime_protos.h" -#include "extensions/filters/http/adaptive_concurrency/concurrency_controller/concurrency_controller.h" +#include "extensions/filters/http/adaptive_concurrency/controller/controller.h" #include "extensions/filters/http/common/pass_through_filter.h" namespace Envoy { @@ -44,8 +44,7 @@ class AdaptiveConcurrencyFilterConfig { using AdaptiveConcurrencyFilterConfigSharedPtr = std::shared_ptr; -using ConcurrencyControllerSharedPtr = - std::shared_ptr; +using ConcurrencyControllerSharedPtr = std::shared_ptr; /** * A filter that samples request latencies and dynamically adjusts the request diff --git a/source/extensions/filters/http/adaptive_concurrency/config.cc b/source/extensions/filters/http/adaptive_concurrency/config.cc index e284d6095650..fc6b9d5e0f99 100644 --- a/source/extensions/filters/http/adaptive_concurrency/config.cc +++ b/source/extensions/filters/http/adaptive_concurrency/config.cc @@ -5,7 +5,7 @@ #include "envoy/registry/registry.h" #include "extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h" -#include "extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller.h" +#include "extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h" namespace Envoy { namespace Extensions { @@ -18,13 +18,13 @@ Http::FilterFactoryCb AdaptiveConcurrencyFilterFactory::createFilterFactoryFromP auto acc_stats_prefix = stats_prefix + "adaptive_concurrency."; - std::shared_ptr controller; + std::shared_ptr controller; using Proto = envoy::extensions::filters::http::adaptive_concurrency::v3::AdaptiveConcurrency; ASSERT(config.concurrency_controller_config_case() == Proto::ConcurrencyControllerConfigCase::kGradientControllerConfig); - auto gradient_controller_config = ConcurrencyController::GradientControllerConfig( - config.gradient_controller_config(), context.runtime()); - controller = std::make_shared( + auto gradient_controller_config = + Controller::GradientControllerConfig(config.gradient_controller_config(), context.runtime()); + controller = std::make_shared( std::move(gradient_controller_config), context.dispatcher(), context.runtime(), acc_stats_prefix + "gradient_controller.", context.scope(), context.random()); diff --git a/source/extensions/filters/http/adaptive_concurrency/concurrency_controller/BUILD b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD similarity index 91% rename from source/extensions/filters/http/adaptive_concurrency/concurrency_controller/BUILD rename to source/extensions/filters/http/adaptive_concurrency/controller/BUILD index c6dfc0a45780..b5e828f9a3b3 100644 --- a/source/extensions/filters/http/adaptive_concurrency/concurrency_controller/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD @@ -13,10 +13,10 @@ load( envoy_package() envoy_cc_library( - name = "concurrency_controller_lib", + name = "controller_lib", srcs = ["gradient_controller.cc"], hdrs = [ - "concurrency_controller.h", + "controller.h", "gradient_controller.h", ], external_deps = [ diff --git a/source/extensions/filters/http/adaptive_concurrency/concurrency_controller/concurrency_controller.h b/source/extensions/filters/http/adaptive_concurrency/controller/controller.h similarity index 95% rename from source/extensions/filters/http/adaptive_concurrency/concurrency_controller/concurrency_controller.h rename to source/extensions/filters/http/adaptive_concurrency/controller/controller.h index 20342c0bd6cf..ecb78307a9d2 100644 --- a/source/extensions/filters/http/adaptive_concurrency/concurrency_controller/concurrency_controller.h +++ b/source/extensions/filters/http/adaptive_concurrency/controller/controller.h @@ -8,7 +8,7 @@ namespace Envoy { namespace Extensions { namespace HttpFilters { namespace AdaptiveConcurrency { -namespace ConcurrencyController { +namespace Controller { /** * The controller's decision on whether a request will be forwarded. @@ -57,7 +57,7 @@ class ConcurrencyController { virtual uint32_t concurrencyLimit() const PURE; }; -} // namespace ConcurrencyController +} // namespace Controller } // namespace AdaptiveConcurrency } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller.cc b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc similarity index 81% rename from source/extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller.cc rename to source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc index e975e9b59275..c94ddaef11c3 100644 --- a/source/extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller.cc +++ b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc @@ -1,4 +1,4 @@ -#include "extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller.h" +#include "extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h" #include #include @@ -12,7 +12,7 @@ #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" -#include "extensions/filters/http/adaptive_concurrency/concurrency_controller/concurrency_controller.h" +#include "extensions/filters/http/adaptive_concurrency/controller/controller.h" #include "absl/synchronization/mutex.h" @@ -20,7 +20,7 @@ namespace Envoy { namespace Extensions { namespace HttpFilters { namespace AdaptiveConcurrency { -namespace ConcurrencyController { +namespace Controller { GradientControllerConfig::GradientControllerConfig( const envoy::extensions::filters::http::adaptive_concurrency::v3::GradientControllerConfig& @@ -48,8 +48,8 @@ GradientController::GradientController(GradientControllerConfig config, const std::string& stats_prefix, Stats::Scope& scope, Runtime::RandomGenerator& random) : config_(std::move(config)), dispatcher_(dispatcher), scope_(scope), - stats_(generateStats(scope_, stats_prefix)), random_(random), num_rq_outstanding_(0), - concurrency_limit_(config_.minConcurrency()), + stats_(generateStats(scope_, stats_prefix)), random_(random), deferred_limit_value_(0), + num_rq_outstanding_(0), concurrency_limit_(config_.minConcurrency()), latency_sample_hist_(hist_fast_alloc(), hist_free) { min_rtt_calc_timer_ = dispatcher_.createTimer([this]() -> void { enterMinRTTSamplingWindow(); }); @@ -81,6 +81,14 @@ GradientControllerStats GradientController::generateStats(Stats::Scope& scope, } void GradientController::enterMinRTTSamplingWindow() { + // There a potential race condition where setting the minimum concurrency multiple times in a row + // resets the minRTT sampling timer and triggers the calculation immediately. This could occur + // after the minRTT sampling window has already been entered, so we can simply return here knowing + // the desired action is already being performed. + if (inMinRTTSamplingWindow()) { + return; + } + absl::MutexLock ml(&sample_mutation_mtx_); stats_.min_rtt_calculation_active_.set(1); @@ -88,7 +96,7 @@ void GradientController::enterMinRTTSamplingWindow() { // Set the minRTT flag to indicate we're gathering samples to update the value. This will // prevent the sample window from resetting until enough requests are gathered to complete the // recalculation. - deferred_limit_value_.store(concurrencyLimit()); + deferred_limit_value_.store(GradientController::concurrencyLimit()); updateConcurrencyLimit(config_.minConcurrency()); // Throw away any latency samples from before the recalculation window as it may not represent @@ -209,7 +217,33 @@ void GradientController::cancelLatencySample() { --num_rq_outstanding_; } -} // namespace ConcurrencyController +void GradientController::updateConcurrencyLimit(const uint32_t new_limit) { + const auto old_limit = concurrency_limit_.load(); + concurrency_limit_.store(new_limit); + stats_.concurrency_limit_.set(concurrency_limit_.load()); + + if (!inMinRTTSamplingWindow() && old_limit == config_.minConcurrency() && + new_limit == config_.minConcurrency()) { + ++consecutive_min_concurrency_set_; + } else { + consecutive_min_concurrency_set_ = 0; + } + + // If the concurrency limit is being set to the minimum value for the 5th consecutive sample + // window while not in the middle of a minRTT measurement, this might be indicative of an + // inaccurate minRTT measurement. Since the limit is already where it needs to be for a minRTT + // measurement, we should measure it again. + // + // There is a possibility that the minRTT measurement begins before we are able to + // cancel/re-enable the timer below and triggers overlapping minRTT windows. To protect against + // this, there is an explicit check when entering the minRTT measurement that ensures there is + // only a single minRTT measurement active at a time. + if (consecutive_min_concurrency_set_ >= 5) { + min_rtt_calc_timer_->enableTimer(std::chrono::milliseconds(0)); + } +} + +} // namespace Controller } // namespace AdaptiveConcurrency } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller.h b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h similarity index 95% rename from source/extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller.h rename to source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h index be59dc816e5b..1da1c3d8b81a 100644 --- a/source/extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller.h +++ b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h @@ -8,7 +8,7 @@ #include "envoy/runtime/runtime.h" #include "envoy/stats/stats_macros.h" -#include "extensions/filters/http/adaptive_concurrency/concurrency_controller/concurrency_controller.h" +#include "extensions/filters/http/adaptive_concurrency/controller/controller.h" #include "absl/base/thread_annotations.h" #include "absl/strings/numbers.h" @@ -19,7 +19,7 @@ namespace Envoy { namespace Extensions { namespace HttpFilters { namespace AdaptiveConcurrency { -namespace ConcurrencyController { +namespace Controller { /** * All stats for the gradient controller. @@ -228,10 +228,8 @@ class GradientController : public ConcurrencyController { void enterMinRTTSamplingWindow(); bool inMinRTTSamplingWindow() const { return deferred_limit_value_.load() > 0; } void resetSampleWindow() ABSL_EXCLUSIVE_LOCKS_REQUIRED(sample_mutation_mtx_); - void updateConcurrencyLimit(const uint32_t new_limit) { - concurrency_limit_.store(new_limit); - stats_.concurrency_limit_.set(concurrency_limit_.load()); - } + void updateConcurrencyLimit(const uint32_t new_limit) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(sample_mutation_mtx_); std::chrono::milliseconds applyJitter(std::chrono::milliseconds interval, double jitter_pct) const; @@ -271,12 +269,17 @@ class GradientController : public ConcurrencyController { std::unique_ptr latency_sample_hist_ ABSL_GUARDED_BY(sample_mutation_mtx_); + // Tracks the number of consecutive times that the concurrency limit is set to the minimum. This + // is used to determine whether the controller should trigger an additional minRTT measurement + // after remaining at the minimum limit for too long. + uint32_t consecutive_min_concurrency_set_ ABSL_GUARDED_BY(sample_mutation_mtx_); + Event::TimerPtr min_rtt_calc_timer_; Event::TimerPtr sample_reset_timer_; }; using GradientControllerSharedPtr = std::shared_ptr; -} // namespace ConcurrencyController +} // namespace Controller } // namespace AdaptiveConcurrency } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/aws_lambda/BUILD b/source/extensions/filters/http/aws_lambda/BUILD index b7c67308ce2d..a3c73926c517 100644 --- a/source/extensions/filters/http/aws_lambda/BUILD +++ b/source/extensions/filters/http/aws_lambda/BUILD @@ -8,16 +8,24 @@ load( "envoy_cc_extension", "envoy_cc_library", "envoy_package", + "envoy_proto_library", ) envoy_package() +envoy_proto_library( + name = "request_response", + srcs = ["request_response.proto"], +) + envoy_cc_library( name = "aws_lambda_filter_lib", srcs = ["aws_lambda_filter.cc"], hdrs = ["aws_lambda_filter.h"], deps = [ + ":request_response_cc_proto", "//include/envoy/http:filter_interface", + "//source/common/common:base64_lib", "//source/extensions/common/aws:credentials_provider_impl_lib", "//source/extensions/common/aws:signer_impl_lib", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc index d90e5a410567..3855cd360bce 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc @@ -3,16 +3,27 @@ #include #include +#include "envoy/http/codes.h" +#include "envoy/http/filter.h" +#include "envoy/http/header_map.h" #include "envoy/upstream/upstream.h" +#include "common/buffer/buffer_impl.h" +#include "common/common/base64.h" #include "common/common/fmt.h" #include "common/common/hex.h" #include "common/crypto/utility.h" #include "common/http/headers.h" +#include "common/http/utility.h" +#include "common/protobuf/message_validator_impl.h" #include "common/protobuf/utility.h" +#include "common/singleton/const_singleton.h" + +#include "source/extensions/filters/http/aws_lambda/request_response.pb.validate.h" #include "extensions/filters/http/well_known_names.h" +#include "absl/strings/numbers.h" #include "absl/strings/string_view.h" namespace Envoy { @@ -20,15 +31,28 @@ namespace Extensions { namespace HttpFilters { namespace AwsLambdaFilter { +class LambdaFilterNameValues { +public: + Http::LowerCaseString InvocationTypeHeader{std::string{"x-amz-invocation-type"}}; + Http::LowerCaseString FunctionErrorHeader{std::string{"x-amz-function-error"}}; +}; + +using LambdaFilterNames = ConstSingleton; + namespace { constexpr auto filter_metadata_key = "com.amazonaws.lambda"; constexpr auto egress_gateway_metadata_key = "egress_gateway"; -void setHeaders(Http::RequestHeaderMap& headers, absl::string_view function_name) { +void setLambdaHeaders(Http::RequestHeaderMap& headers, absl::string_view function_name, + InvocationMode mode) { headers.setMethod(Http::Headers::get().MethodValues.Post); headers.setPath(fmt::format("/2015-03-31/functions/{}/invocations", function_name)); - headers.setCopy(Http::LowerCaseString{"x-amz-invocation-type"}, "RequestResponse"); + if (mode == InvocationMode::Synchronous) { + headers.setReference(LambdaFilterNames::get().InvocationTypeHeader, "RequestResponse"); + } else { + headers.setReference(LambdaFilterNames::get().InvocationTypeHeader, "Event"); + } } /** @@ -54,13 +78,49 @@ bool isTargetClusterLambdaGateway(Upstream::ClusterInfo const& cluster_info) { return egress_gateway_it->second.bool_value(); } +bool isContentTypeTextual(const Http::RequestOrResponseHeaderMap& headers) { + // If transfer-encoding is anything other than 'identity' (i.e. chunked, compress, deflate or + // gzip) then we want to base64-encode the response body regardless of the content-type value. + if (auto encoding_header = headers.TransferEncoding()) { + if (!absl::EqualsIgnoreCase(encoding_header->value().getStringView(), + Http::Headers::get().TransferEncodingValues.Identity)) { + return false; + } + } + + // If we don't know the content-type, then we can't make any assumptions. + if (!headers.ContentType()) { + return false; + } + + const Http::LowerCaseString content_type_value{ + std::string(headers.ContentType()->value().getStringView())}; + if (content_type_value.get() == Http::Headers::get().ContentTypeValues.Json) { + return true; + } + + if (content_type_value.get() == "application/javascript") { + return true; + } + + if (content_type_value.get() == "application/xml") { + return true; + } + + if (absl::StartsWith(content_type_value.get(), "text/")) { + return true; + } + + return false; +} + } // namespace Filter::Filter(const FilterSettings& settings, const std::shared_ptr& sigv4_signer) : settings_(settings), sigv4_signer_(sigv4_signer) {} -absl::optional Filter::calculateRouteArn() { +absl::optional Filter::getRouteSpecificSettings() const { if (!decoder_callbacks_->route() || !decoder_callbacks_->route()->routeEntry()) { return absl::nullopt; } @@ -71,58 +131,258 @@ absl::optional Filter::calculateRouteArn() { return absl::nullopt; } - return parseArn(settings->arn()); + return *settings; +} + +std::string Filter::resolveSettings() { + if (auto route_settings = getRouteSpecificSettings()) { + if (auto route_arn = parseArn(route_settings->arn())) { + arn_.swap(route_arn); + payload_passthrough_ = route_settings->payloadPassthrough(); + invocation_mode_ = route_settings->invocationMode(); + } else { + // TODO(marcomagdy): add stats for this error + ENVOY_LOG(debug, "Found route specific configuration but failed to parse Lambda ARN {}.", + route_settings->arn()); + return "Invalid AWS Lambda ARN"; + } + } else { + payload_passthrough_ = settings_.payloadPassthrough(); + invocation_mode_ = settings_.invocationMode(); + } + return {}; } Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) { - if (!settings_.payloadPassthrough()) { + auto cluster_info_ptr = decoder_callbacks_->clusterInfo(); + if (!cluster_info_ptr || !isTargetClusterLambdaGateway(*cluster_info_ptr)) { skip_ = true; + ENVOY_LOG(trace, "Target cluster does not have the Lambda metadata. Moving on."); return Http::FilterHeadersStatus::Continue; } - auto route_arn = calculateRouteArn(); - if (route_arn.has_value()) { - auto cluster_info_ptr = decoder_callbacks_->clusterInfo(); - ASSERT(cluster_info_ptr); - if (!isTargetClusterLambdaGateway(*cluster_info_ptr)) { - skip_ = true; - return Http::FilterHeadersStatus::Continue; - } - arn_.swap(route_arn); - } else { + const auto err = resolveSettings(); + + if (!err.empty()) { + skip_ = true; + decoder_callbacks_->sendLocalReply(Http::Code::BadRequest, err, nullptr /*modify_headers*/, + absl::nullopt /*grpc_status*/, "" /*details*/); + return Http::FilterHeadersStatus::StopIteration; + } + + if (!arn_) { arn_ = parseArn(settings_.arn()); if (!arn_.has_value()) { - ENVOY_LOG(error, "Unable to parse Lambda ARN {}.", settings_.arn()); + ENVOY_LOG(error, "Failed to parse Lambda ARN {}.", settings_.arn()); skip_ = true; - return Http::FilterHeadersStatus::Continue; + decoder_callbacks_->sendLocalReply(Http::Code::BadRequest, "Invalid AWS Lambda ARN", + nullptr /*modify_headers*/, absl::nullopt /*grpc_status*/, + "" /*details*/); + return Http::FilterHeadersStatus::StopIteration; } } - if (end_stream) { - setHeaders(headers, arn_->functionName()); + if (!end_stream) { + request_headers_ = &headers; + return Http::FilterHeadersStatus::StopIteration; + } + + if (payload_passthrough_) { + setLambdaHeaders(headers, arn_->functionName(), invocation_mode_); sigv4_signer_->sign(headers); return Http::FilterHeadersStatus::Continue; } - headers_ = &headers; + Buffer::OwnedImpl json_buf; + jsonizeRequest(headers, nullptr, json_buf); + // We must call setLambdaHeaders *after* the JSON transformation of the request. That way we + // reflect the actual incoming request headers instead of the overwritten ones. + setLambdaHeaders(headers, arn_->functionName(), invocation_mode_); + headers.setContentLength(json_buf.length()); + headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + auto& hashing_util = Envoy::Common::Crypto::UtilitySingleton::get(); + const auto hash = Hex::encode(hashing_util.getSha256Digest(json_buf)); + sigv4_signer_->sign(headers, hash); + decoder_callbacks_->addDecodedData(json_buf, false); + return Http::FilterHeadersStatus::Continue; +} + +Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { + if (skip_ || end_stream) { + return Http::FilterHeadersStatus::Continue; + } + + // Check for errors returned by Lambda. + // If we detect an error, we skip the encodeData step to hand the error back to the user as is. + // Errors can be in the form of HTTP status code or x-amz-function-error header + const auto http_status = Http::Utility::getResponseStatus(headers); + if (http_status >= 300) { + skip_ = true; + return Http::FilterHeadersStatus::Continue; + } + + // Just the existence of this header means we have an error, so skip. + if (headers.get(LambdaFilterNames::get().FunctionErrorHeader)) { + skip_ = true; + return Http::FilterHeadersStatus::Continue; + } + + response_headers_ = &headers; return Http::FilterHeadersStatus::StopIteration; } Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_stream) { - UNREFERENCED_PARAMETER(data); if (skip_) { return Http::FilterDataStatus::Continue; } - if (end_stream) { - setHeaders(*headers_, arn_->functionName()); - auto& hashing_util = Envoy::Common::Crypto::UtilitySingleton::get(); - const Buffer::Instance& decoding_buffer = *decoder_callbacks_->decodingBuffer(); - const auto hash = Hex::encode(hashing_util.getSha256Digest(decoding_buffer)); - sigv4_signer_->sign(*headers_, hash); + if (!end_stream) { + return Http::FilterDataStatus::StopIterationAndBuffer; + } + + auto& hashing_util = Envoy::Common::Crypto::UtilitySingleton::get(); + decoder_callbacks_->addDecodedData(data, false); + + const Buffer::Instance& decoding_buffer = *decoder_callbacks_->decodingBuffer(); + + if (!payload_passthrough_) { + decoder_callbacks_->modifyDecodingBuffer([this](Buffer::Instance& dec_buf) { + Buffer::OwnedImpl json_buf; + jsonizeRequest(*request_headers_, &dec_buf, json_buf); + // effectively swap(data, json_buf) + dec_buf.drain(dec_buf.length()); + dec_buf.move(json_buf); + }); + request_headers_->setContentLength(decoding_buffer.length()); + request_headers_->setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + } + + setLambdaHeaders(*request_headers_, arn_->functionName(), invocation_mode_); + const auto hash = Hex::encode(hashing_util.getSha256Digest(decoding_buffer)); + sigv4_signer_->sign(*request_headers_, hash); + return Http::FilterDataStatus::Continue; +} + +Http::FilterDataStatus Filter::encodeData(Buffer::Instance& data, bool end_stream) { + if (skip_ || payload_passthrough_ || invocation_mode_ == InvocationMode::Asynchronous) { return Http::FilterDataStatus::Continue; } - return Http::FilterDataStatus::StopIterationAndBuffer; + + if (!end_stream) { + return Http::FilterDataStatus::StopIterationAndBuffer; + } + + ENVOY_LOG(trace, "Tranforming JSON payload to HTTP response."); + if (!encoder_callbacks_->encodingBuffer()) { + encoder_callbacks_->addEncodedData(data, false); + } + const Buffer::Instance& encoding_buffer = *encoder_callbacks_->encodingBuffer(); + encoder_callbacks_->modifyEncodingBuffer([this](Buffer::Instance& enc_buf) { + Buffer::OwnedImpl body; + dejsonizeResponse(*response_headers_, enc_buf, body); + enc_buf.drain(enc_buf.length()); + enc_buf.move(body); + }); + response_headers_->setContentLength(encoding_buffer.length()); + return Http::FilterDataStatus::Continue; +} + +void Filter::jsonizeRequest(Http::RequestHeaderMap const& headers, const Buffer::Instance* body, + Buffer::Instance& out) const { + using source::extensions::filters::http::aws_lambda::Request; + Request json_req; + if (headers.Path()) { + json_req.set_raw_path(std::string(headers.Path()->value().getStringView())); + } + + if (headers.Method()) { + json_req.set_method(std::string(headers.Method()->value().getStringView())); + } + + // Wrap the headers + headers.iterate( + [](const Http::HeaderEntry& entry, void* ctx) -> Http::HeaderMap::Iterate { + auto* req = static_cast(ctx); + // ignore H2 pseudo-headers + if (absl::StartsWith(entry.key().getStringView(), ":")) { + return Http::HeaderMap::Iterate::Continue; + } + std::string name = std::string(entry.key().getStringView()); + auto it = req->mutable_headers()->find(name); + if (it == req->headers().end()) { + req->mutable_headers()->insert({name, std::string(entry.value().getStringView())}); + } else { + // Coalesce headers with multiple values + it->second += fmt::format(",{}", entry.value().getStringView()); + } + return Http::HeaderMap::Iterate::Continue; + }, + &json_req); + + // Wrap the Query String + for (auto&& kv_pair : Http::Utility::parseQueryString(headers.Path()->value().getStringView())) { + json_req.mutable_query_string_parameters()->insert({kv_pair.first, kv_pair.second}); + } + + // Wrap the body + if (body) { + if (isContentTypeTextual(headers)) { + json_req.set_body(body->toString()); + json_req.set_is_base64_encoded(false); + } else { + json_req.set_body(Base64::encode(*body, body->length())); + json_req.set_is_base64_encoded(true); + } + } + + MessageUtil::validate(json_req, ProtobufMessage::getStrictValidationVisitor()); + const std::string json_data = MessageUtil::getJsonStringFromMessage( + json_req, false /* pretty_print */, true /* always_print_primitive_fields */); + out.add(json_data); +} + +void Filter::dejsonizeResponse(Http::ResponseHeaderMap& headers, const Buffer::Instance& json_buf, + Buffer::Instance& body) const { + using source::extensions::filters::http::aws_lambda::Response; + Response json_resp; + try { + MessageUtil::loadFromJson(json_buf.toString(), json_resp, + ProtobufMessage::getNullValidationVisitor()); + } catch (EnvoyException& ex) { + // We would only get here if all of the following are true: + // 1- Passthrough is set to false + // 2- Lambda returned a 200 OK + // 3- There was no x-amz-function-error header + // 4- The body contains invalid JSON + headers.setStatus(static_cast(Http::Code::InternalServerError)); + // TODO(marcomagdy): Replace the following log with a stat instead + ENVOY_LOG(debug, "Failed to parse JSON response from AWS Lambda.\n{}", ex.what()); + return; + } + + for (auto&& kv : json_resp.headers()) { + // ignore H2 pseudo-headers (if any) + if (kv.first[0] == ':') { + continue; + } + headers.setCopy(Http::LowerCaseString(kv.first), kv.second); + } + + for (auto&& cookie : json_resp.cookies()) { + headers.addReferenceKey(Http::Headers::get().SetCookie, cookie); + } + + if (json_resp.status_code() != 0) { + headers.setStatus(json_resp.status_code()); + } + headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + if (!json_resp.body().empty()) { + if (json_resp.is_base64_encoded()) { + body.add(Base64::decode(json_resp.body())); + } else { + body.add(json_resp.body()); + } + } } absl::optional parseArn(absl::string_view arn) { diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h index c59016480bd0..39b7f4c56ab0 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h @@ -4,6 +4,8 @@ #include "envoy/http/filter.h" +#include "common/buffer/buffer_impl.h" + #include "extensions/common/aws/signer.h" #include "extensions/filters/http/common/pass_through_filter.h" @@ -51,16 +53,25 @@ class Arn { */ absl::optional parseArn(absl::string_view arn); +/** + * Lambda invocation mode. + * Synchronous mode is analogous to a blocking call; Lambda responds when it's completed processing. + * In the Asynchronous mode, Lambda responds immediately acknowledging it received the request. + */ +enum class InvocationMode { Synchronous, Asynchronous }; + class FilterSettings : public Router::RouteSpecificFilterConfig { public: - FilterSettings(const std::string& arn, bool payload_passthrough) - : arn_(arn), payload_passthrough_(payload_passthrough) {} + FilterSettings(const std::string& arn, InvocationMode mode, bool payload_passthrough) + : arn_(arn), invocation_mode_(mode), payload_passthrough_(payload_passthrough) {} const std::string& arn() const { return arn_; } bool payloadPassthrough() const { return payload_passthrough_; } + InvocationMode invocationMode() const { return invocation_mode_; } private: std::string arn_; + InvocationMode invocation_mode_; bool payload_passthrough_; }; @@ -70,19 +81,39 @@ class Filter : public Http::PassThroughFilter, Logger::Loggable& sigv4_signer); - Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, - bool end_stream) override; + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool end_stream) override; Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override; -private: + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool end_stream) override; + Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override; + /** - * Calculates the route specific Lambda ARN if any. + * Calculates the function ARN, value of pass-through, etc. by checking per-filter configurations + * and general filter configuration. Ultimately, the most specific configuration wins. + * @return error message if settings are invalid. Otherwise, empty string. */ - absl::optional calculateRouteArn(); + std::string resolveSettings(); + + /** + * Used for unit testing only + */ + const FilterSettings& settingsForTest() const { return settings_; } + +private: + absl::optional getRouteSpecificSettings() const; + // Convert the HTTP request to JSON request. + void jsonizeRequest(const Http::RequestHeaderMap& headers, const Buffer::Instance* body, + Buffer::Instance& out) const; + // Convert the JSON response to a standard HTTP response. + void dejsonizeResponse(Http::ResponseHeaderMap& headers, const Buffer::Instance& body, + Buffer::Instance& out) const; const FilterSettings settings_; - Http::RequestHeaderMap* headers_ = nullptr; + Http::RequestHeaderMap* request_headers_ = nullptr; + Http::ResponseHeaderMap* response_headers_ = nullptr; std::shared_ptr sigv4_signer_; absl::optional arn_; + InvocationMode invocation_mode_ = InvocationMode::Synchronous; + bool payload_passthrough_ = false; bool skip_ = false; }; diff --git a/source/extensions/filters/http/aws_lambda/config.cc b/source/extensions/filters/http/aws_lambda/config.cc index 058fe9f1bede..fef53db4a776 100644 --- a/source/extensions/filters/http/aws_lambda/config.cc +++ b/source/extensions/filters/http/aws_lambda/config.cc @@ -23,6 +23,21 @@ std::string extractRegionFromArn(absl::string_view arn) { } throw EnvoyException(fmt::format("Invalid ARN: {}", arn)); } + +InvocationMode +getInvocationMode(const envoy::extensions::filters::http::aws_lambda::v3::Config& proto_config) { + using namespace envoy::extensions::filters::http::aws_lambda::v3; + switch (proto_config.invocation_mode()) { + case Config_InvocationMode_ASYNCHRONOUS: + return InvocationMode::Asynchronous; + break; + case Config_InvocationMode_SYNCHRONOUS: + return InvocationMode::Synchronous; + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} } // namespace Http::FilterFactoryCb AwsLambdaFilterFactory::createFilterFactoryFromProtoTyped( @@ -37,7 +52,8 @@ Http::FilterFactoryCb AwsLambdaFilterFactory::createFilterFactoryFromProtoTyped( auto signer = std::make_shared( service_name, region, std::move(credentials_provider), context.dispatcher().timeSource()); - FilterSettings filter_settings{proto_config.arn(), proto_config.payload_passthrough()}; + FilterSettings filter_settings{proto_config.arn(), getInvocationMode(proto_config), + proto_config.payload_passthrough()}; return [signer, filter_settings](Http::FilterChainFactoryCallbacks& cb) { auto filter = std::make_shared(filter_settings, signer); @@ -50,7 +66,8 @@ AwsLambdaFilterFactory::createRouteSpecificFilterConfigTyped( const envoy::extensions::filters::http::aws_lambda::v3::PerRouteConfig& proto_config, Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) { return std::make_shared(FilterSettings{ - proto_config.invoke_config().arn(), proto_config.invoke_config().payload_passthrough()}); + proto_config.invoke_config().arn(), getInvocationMode(proto_config.invoke_config()), + proto_config.invoke_config().payload_passthrough()}); } /* * Static registration for the AWS Lambda filter. @see RegisterFactory. diff --git a/source/extensions/filters/http/aws_lambda/request_response.proto b/source/extensions/filters/http/aws_lambda/request_response.proto new file mode 100644 index 000000000000..69fb23b4aa9c --- /dev/null +++ b/source/extensions/filters/http/aws_lambda/request_response.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +// The structures are used for the purpose of JSON (de)serialization. +package source.extensions.filters.http.aws_lambda; + +import "validate/validate.proto"; + +message Request { + string raw_path = 1 [(validate.rules).string = {min_len: 1}]; + + string method = 2 [(validate.rules).string = {min_len: 1}]; + // HTTP headers with the same name are coalesced into a single comma-separated value. + map headers = 3; + + // multi-value keys are overwritten. Last one wins. + map query_string_parameters = 4; + + string body = 5; + + bool is_base64_encoded = 6; +} + +message Response { + uint32 status_code = 1; + map headers = 2; + // cookies are split from headers in the response because the headers are coalesced while the HTTP RFC prohibits + // coalescing multiple cookie values in the Set-Cookie header. + repeated string cookies = 3; + string body = 4; + bool is_base64_encoded = 5; +} diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index 5c82eeecf13a..05e07a84fd7c 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -311,7 +311,7 @@ class HttpCacheFactory : public Config::TypedFactory { // as the calling CacheFilter). virtual HttpCache& getCache(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config) PURE; - virtual ~HttpCacheFactory() = default; + ~HttpCacheFactory() override = default; private: const std::string name_; diff --git a/source/extensions/filters/http/common/compressor/compressor.cc b/source/extensions/filters/http/common/compressor/compressor.cc index f417b890b5e2..5242ee31f419 100644 --- a/source/extensions/filters/http/common/compressor/compressor.cc +++ b/source/extensions/filters/http/common/compressor/compressor.cc @@ -68,13 +68,8 @@ Http::FilterHeadersStatus CompressorFilter::decodeHeaders(Http::RequestHeaderMap accept_encoding_ = std::make_unique(accept_encoding->value().getStringView()); } - if (config_->enabled()) { - skip_compression_ = false; - if (config_->removeAcceptEncodingHeader()) { - headers.removeAcceptEncoding(); - } - } else { - config_->stats().not_compressed_.inc(); + if (config_->enabled() && config_->removeAcceptEncodingHeader()) { + headers.removeAcceptEncoding(); } return Http::FilterHeadersStatus::Continue; @@ -105,10 +100,11 @@ void CompressorFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallba Http::FilterHeadersStatus CompressorFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { - if (!end_stream && !skip_compression_ && isMinimumContentLength(headers) && + if (!end_stream && config_->enabled() && isMinimumContentLength(headers) && isAcceptEncodingAllowed(headers) && isContentTypeAllowed(headers) && !hasCacheControlNoTransform(headers) && isEtagAllowed(headers) && isTransferEncodingAllowed(headers) && !headers.ContentEncoding()) { + skip_compression_ = false; sanitizeEtagHeader(headers); insertVaryHeader(headers); headers.removeContentLength(); @@ -116,8 +112,7 @@ Http::FilterHeadersStatus CompressorFilter::encodeHeaders(Http::ResponseHeaderMa config_->stats().compressed_.inc(); // Finally instantiate the compressor. compressor_ = config_->makeCompressor(); - } else if (!skip_compression_) { - skip_compression_ = true; + } else { config_->stats().not_compressed_.inc(); } return Http::FilterHeadersStatus::Continue; diff --git a/source/extensions/filters/http/common/jwks_fetcher.cc b/source/extensions/filters/http/common/jwks_fetcher.cc index 90bf7cf61733..3406879727c7 100644 --- a/source/extensions/filters/http/common/jwks_fetcher.cc +++ b/source/extensions/filters/http/common/jwks_fetcher.cc @@ -63,7 +63,7 @@ class JwksFetcherImpl : public JwksFetcher, } // HTTP async receive methods - void onSuccess(Http::ResponseMessagePtr&& response) override { + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override { ENVOY_LOG(trace, "{}", __func__); complete_ = true; const uint64_t status_code = Http::Utility::getResponseStatus(response->headers()); @@ -93,7 +93,8 @@ class JwksFetcherImpl : public JwksFetcher, reset(); } - void onFailure(Http::AsyncClient::FailureReason reason) override { + void onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) override { ENVOY_LOG(debug, "{}: fetch pubkey [uri = {}]: network error {}", __func__, uri_->uri(), enumToInt(reason)); complete_ = true; diff --git a/source/extensions/filters/http/dynamo/dynamo_filter.cc b/source/extensions/filters/http/dynamo/dynamo_filter.cc index 088fa5c016f7..4be293580b8b 100644 --- a/source/extensions/filters/http/dynamo/dynamo_filter.cc +++ b/source/extensions/filters/http/dynamo/dynamo_filter.cc @@ -137,19 +137,14 @@ Http::FilterTrailersStatus DynamoFilter::encodeTrailers(Http::ResponseTrailerMap std::string DynamoFilter::buildBody(const Buffer::Instance* buffered, const Buffer::Instance& last) { std::string body; + body.reserve((buffered ? buffered->length() : 0) + last.length()); if (buffered) { - uint64_t num_slices = buffered->getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - buffered->getRawSlices(slices.begin(), num_slices); - for (const Buffer::RawSlice& slice : slices) { + for (const Buffer::RawSlice& slice : buffered->getRawSlices()) { body.append(static_cast(slice.mem_), slice.len_); } } - uint64_t num_slices = last.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - last.getRawSlices(slices.begin(), num_slices); - for (const Buffer::RawSlice& slice : slices) { + for (const Buffer::RawSlice& slice : last.getRawSlices()) { body.append(static_cast(slice.mem_), slice.len_); } diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 33bf64354c40..e3f754e07841 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -31,28 +31,12 @@ void FilterConfigPerRoute::merge(const FilterConfigPerRoute& other) { } } -void Filter::initiateCall(const Http::RequestHeaderMap& headers) { +void Filter::initiateCall(const Http::RequestHeaderMap& headers, + const Router::RouteConstSharedPtr& route) { if (filter_return_ == FilterReturn::StopDecoding) { return; } - Router::RouteConstSharedPtr route = callbacks_->route(); - if (route == nullptr || route->routeEntry() == nullptr) { - return; - } - cluster_ = callbacks_->clusterInfo(); - - // Fast route - if we are disabled, no need to merge. - const auto* specific_per_route_config = - Http::Utility::resolveMostSpecificPerFilterConfig( - HttpFilterNames::get().ExtAuthorization, route); - if (specific_per_route_config != nullptr) { - if (specific_per_route_config->disabled()) { - return; - } - } - - // We are not disabled - get a merged view of the config: auto&& maybe_merged_per_route_config = Http::Utility::getMergedPerFilterConfig( HttpFilterNames::get().ExtAuthorization, route, @@ -83,13 +67,17 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { state_ = State::Calling; filter_return_ = FilterReturn::StopDecoding; // Don't let the filter chain continue as we are // going to invoke check call. + cluster_ = callbacks_->clusterInfo(); initiating_call_ = true; - client_->check(*this, check_request_, callbacks_->activeSpan()); + client_->check(*this, check_request_, callbacks_->activeSpan(), callbacks_->streamInfo()); initiating_call_ = false; } Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) { - if (!config_->filterEnabled()) { + Router::RouteConstSharedPtr route = callbacks_->route(); + skip_check_ = skipCheckForRoute(route); + + if (!config_->filterEnabled() || skip_check_) { return Http::FilterHeadersStatus::Continue; } @@ -105,14 +93,15 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, return Http::FilterHeadersStatus::StopIteration; } - initiateCall(headers); + // Initiate a call to the authorization server since we are not disabled. + initiateCall(headers, route); return filter_return_ == FilterReturn::StopDecoding ? Http::FilterHeadersStatus::StopAllIterationAndWatermark : Http::FilterHeadersStatus::Continue; } Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_stream) { - if (buffer_data_) { + if (buffer_data_ && !skip_check_) { const bool buffer_is_full = isBufferFull(); if (end_stream || buffer_is_full) { ENVOY_STREAM_LOG(debug, "ext_authz filter finished buffering the request since {}", @@ -121,7 +110,7 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea // Make sure data is available in initiateCall. callbacks_->addDecodedData(data, true); } - initiateCall(*request_headers_); + initiateCall(*request_headers_, callbacks_->route()); return filter_return_ == FilterReturn::StopDecoding ? Http::FilterDataStatus::StopIterationAndWatermark : Http::FilterDataStatus::Continue; @@ -134,10 +123,10 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea } Http::FilterTrailersStatus Filter::decodeTrailers(Http::RequestTrailerMap&) { - if (buffer_data_) { + if (buffer_data_ && !skip_check_) { if (filter_return_ != FilterReturn::StopDecoding) { ENVOY_STREAM_LOG(debug, "ext_authz filter finished buffering the request", *callbacks_); - initiateCall(*request_headers_); + initiateCall(*request_headers_, callbacks_->route()); } return filter_return_ == FilterReturn::StopDecoding ? Http::FilterTrailersStatus::StopIteration : Http::FilterTrailersStatus::Continue; @@ -263,7 +252,7 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { } } -bool Filter::isBufferFull() { +bool Filter::isBufferFull() const { const auto* buffer = callbacks_->decodingBuffer(); if (config_->allowPartialMessage() && buffer != nullptr) { return buffer->length() >= config_->maxRequestBytes(); @@ -278,6 +267,21 @@ void Filter::continueDecoding() { } } +bool Filter::skipCheckForRoute(const Router::RouteConstSharedPtr& route) const { + if (route == nullptr || route->routeEntry() == nullptr) { + return true; + } + + const auto* specific_per_route_config = + Http::Utility::resolveMostSpecificPerFilterConfig( + HttpFilterNames::get().ExtAuthorization, route); + if (specific_per_route_config != nullptr) { + return specific_per_route_config->disabled(); + } + + return false; +} + } // namespace ExtAuthz } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 1d7697f25338..56ce8b5a3074 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -220,9 +220,11 @@ class Filter : public Logger::Loggable, private: void addResponseHeaders(Http::HeaderMap& header_map, const Http::HeaderVector& headers); - void initiateCall(const Http::RequestHeaderMap& headers); + void initiateCall(const Http::RequestHeaderMap& headers, + const Router::RouteConstSharedPtr& route); void continueDecoding(); - bool isBufferFull(); + bool isBufferFull() const; + bool skipCheckForRoute(const Router::RouteConstSharedPtr& route) const; // State of this filter's communication with the external authorization service. // The filter has either not started calling the external service, in the middle of calling @@ -248,6 +250,7 @@ class Filter : public Logger::Loggable, // Used to identify if the callback to onComplete() is synchronous (on the stack) or asynchronous. bool initiating_call_{}; bool buffer_data_{}; + bool skip_check_{false}; envoy::service::auth::v3::CheckRequest check_request_{}; }; diff --git a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc index b6c77fd5422d..4dc8b216ffe9 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc +++ b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc @@ -19,7 +19,7 @@ namespace HttpFilters { namespace GrpcHttp1Bridge { void Http1BridgeFilter::chargeStat(const Http::ResponseHeaderOrTrailerMap& headers) { - context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, *request_names_, + context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, *request_stat_names_, headers.GrpcStatus()); } @@ -101,7 +101,7 @@ void Http1BridgeFilter::setupStatTracking(const Http::RequestHeaderMap& headers) if (!cluster_) { return; } - request_names_ = context_.resolveServiceAndMethod(headers.Path()); + request_stat_names_ = context_.resolveDynamicServiceAndMethod(headers.Path()); } } // namespace GrpcHttp1Bridge diff --git a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.h b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.h index 5dda6604a24d..991458360fe9 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.h +++ b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.h @@ -47,7 +47,7 @@ class Http1BridgeFilter : public Http::StreamFilter { encoder_callbacks_ = &callbacks; } - bool doStatTracking() const { return request_names_.has_value(); } + bool doStatTracking() const { return request_stat_names_.has_value(); } private: void chargeStat(const Http::ResponseHeaderOrTrailerMap& headers); @@ -58,7 +58,7 @@ class Http1BridgeFilter : public Http::StreamFilter { Http::ResponseHeaderMap* response_headers_{}; bool do_bridging_{}; Upstream::ClusterInfoConstSharedPtr cluster_; - absl::optional request_names_; + absl::optional request_stat_names_; Grpc::Context& context_; }; diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index f194bb66ca60..ae54456db528 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -26,8 +26,6 @@ #include "grpc_transcoding/response_to_json_translator.h" using Envoy::Protobuf::FileDescriptorSet; -using Envoy::Protobuf::io::CodedOutputStream; -using Envoy::Protobuf::io::StringOutputStream; using Envoy::Protobuf::io::ZeroCopyInputStream; using Envoy::ProtobufUtil::Status; using Envoy::ProtobufUtil::error::Code; diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h index 354b0b010230..15ce11054706 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h @@ -47,7 +47,7 @@ struct MethodInfo { bool request_type_is_http_body_ = false; bool response_type_is_http_body_ = false; }; -typedef std::shared_ptr MethodInfoSharedPtr; +using MethodInfoSharedPtr = std::shared_ptr; void createHttpBodyEnvelope(Buffer::Instance& output, const std::vector& request_body_field_path, diff --git a/source/extensions/filters/http/grpc_stats/BUILD b/source/extensions/filters/http/grpc_stats/BUILD index 9b2eb79247b7..171e49afd320 100644 --- a/source/extensions/filters/http/grpc_stats/BUILD +++ b/source/extensions/filters/http/grpc_stats/BUILD @@ -23,6 +23,7 @@ envoy_cc_extension( "//source/common/grpc:codec_lib", "//source/common/grpc:common_lib", "//source/common/grpc:context_lib", + "//source/common/runtime:runtime_lib", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/common:factory_base_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", diff --git a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc index 79f4477332db..a67f17d62cf5 100644 --- a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc +++ b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc @@ -7,6 +7,8 @@ #include "common/grpc/codec.h" #include "common/grpc/common.h" #include "common/grpc/context_impl.h" +#include "common/runtime/runtime_impl.h" +#include "common/stats/symbol_table_impl.h" #include "extensions/filters/http/common/pass_through_filter.h" @@ -17,66 +19,221 @@ namespace GrpcStats { namespace { +// A map from gRPC service/method name to symbolized stat names for the service/method. +// +// The expected usage pattern is that the map is populated once, and can then be queried lock-free +// as long as it isn't being modified. +class GrpcServiceMethodToRequestNamesMap { +public: +public: + // Construct a map populated with the services/methods in method_list. + GrpcServiceMethodToRequestNamesMap(Stats::SymbolTable& symbol_table, + const envoy::config::core::v3::GrpcMethodList& method_list) + : stat_name_pool_(symbol_table), map_(populate(method_list)) {} + + absl::optional + lookup(const Grpc::Common::RequestNames& request_names) const { + auto it = map_.find(request_names); + if (it != map_.end()) { + return it->second; + } + + return {}; + } + +private: + using OwningKey = std::tuple; + using ViewKey = Grpc::Common::RequestNames; + + class MapHash { + private: + // Use the same type for hashing all variations to ensure the same hash value from all source + // types. + using ViewTuple = std::tuple; + static uint64_t hash(const ViewTuple& key) { return absl::Hash()(key); } + + public: + using is_transparent = void; + + uint64_t operator()(const OwningKey& key) const { return hash(key); } + uint64_t operator()(const ViewKey& key) const { + return hash(ViewTuple(key.service_, key.method_)); + } + }; + + struct MapEq { + using is_transparent = void; + bool operator()(const OwningKey& left, const OwningKey& right) const { return left == right; } + bool operator()(const OwningKey& left, const ViewKey& right) const { + return left == std::make_tuple(right.service_, right.method_); + } + }; + using MapType = absl::flat_hash_map; + + // Helper for generating a populated MapType so that `map_` can be const. + MapType populate(const envoy::config::core::v3::GrpcMethodList& method_list) { + MapType map; + for (const auto& service : method_list.services()) { + Stats::StatName stat_name_service = stat_name_pool_.add(service.name()); + + for (const auto& method_name : service.method_names()) { + Stats::StatName stat_name_method = stat_name_pool_.add(method_name); + map[OwningKey(service.name(), method_name)] = + Grpc::Context::RequestStatNames{stat_name_service, stat_name_method}; + } + } + return map; + } + + Stats::StatNamePool stat_name_pool_; + const MapType map_; +}; + +struct Config { + Config(const envoy::extensions::filters::http::grpc_stats::v3::FilterConfig& proto_config, + Server::Configuration::FactoryContext& context) + : context_(context.grpcContext()), emit_filter_state_(proto_config.emit_filter_state()) { + + switch (proto_config.per_method_stat_specifier_case()) { + case envoy::extensions::filters::http::grpc_stats::v3::FilterConfig:: + PER_METHOD_STAT_SPECIFIER_NOT_SET: + case envoy::extensions::filters::http::grpc_stats::v3::FilterConfig::kStatsForAllMethods: + if (proto_config.has_stats_for_all_methods()) { + stats_for_all_methods_ = proto_config.stats_for_all_methods().value(); + } else { + // Default for when "grpc_stats_filter_enable_stats_for_all_methods_by_default" isn't + // set. + // + // This will flip to false after one release. + const bool runtime_feature_default = true; + + const char* runtime_key = "envoy.deprecated_features.grpc_stats_filter_enable_" + "stats_for_all_methods_by_default"; + + stats_for_all_methods_ = context.runtime().snapshot().deprecatedFeatureEnabled( + runtime_key, runtime_feature_default); + + if (stats_for_all_methods_) { + ENVOY_LOG_MISC(warn, + "Using deprecated default value for " + "'envoy.extensions.filters.http.grpc_stats.v3.FilterConfig.stats_for_all_" + "methods'. The default for this field will become false in a future " + "release. To retain this behavior, set this field to true in your " + "configuration. A short-term workaround of setting runtime configuration " + "{} to true can be used if the configuration cannot be changed.", + runtime_key); + } + } + break; + + case envoy::extensions::filters::http::grpc_stats::v3::FilterConfig:: + kIndividualMethodStatsAllowlist: + allowlist_.emplace(context.scope().symbolTable(), + proto_config.individual_method_stats_allowlist()); + break; + } + } + Grpc::Context& context_; + bool emit_filter_state_; + bool stats_for_all_methods_{false}; + absl::optional allowlist_; +}; +using ConfigConstSharedPtr = std::shared_ptr; + class GrpcStatsFilter : public Http::PassThroughFilter { public: - explicit GrpcStatsFilter(Grpc::Context& context, bool emit_filter_state) - : context_(context), emit_filter_state_(emit_filter_state) {} + GrpcStatsFilter(ConfigConstSharedPtr config) : config_(config) {} Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override { grpc_request_ = Grpc::Common::hasGrpcContentType(headers); if (grpc_request_) { cluster_ = decoder_callbacks_->clusterInfo(); if (cluster_) { - request_names_ = context_.resolveServiceAndMethod(headers.Path()); + if (config_->stats_for_all_methods_) { + // Get dynamically-allocated Context::RequestStatNames from the context. + request_names_ = config_->context_.resolveDynamicServiceAndMethod(headers.Path()); + do_stat_tracking_ = request_names_.has_value(); + } else { + // This case handles both proto_config.stats_for_all_methods() == false, + // and proto_config.has_individual_method_stats_allowlist(). This works + // because proto_config.stats_for_all_methods() == false results in + // an empty allowlist, which exactly matches the behavior specified for + // this configuration. + // + // Resolve the service and method to a string_view, then get + // the Context::RequestStatNames out of the pre-allocated list that + // can be produced with the allowlist being present. + absl::optional request_names = + Grpc::Common::resolveServiceAndMethod(headers.Path()); + + if (request_names) { + // Do stat tracking as long as this looks like a grpc service/method, + // even if it isn't in the allowlist. Things not in the allowlist + // are counted with a stat with no service/method in the name. + do_stat_tracking_ = true; + + // If the entry is not found in the allowlist, this will return + // an empty optional; each of the `charge` functions on the context + // will interpret an empty optional for this value to mean that the + // service.method prefix on the stat should be omitted. + if (config_->allowlist_) { + request_names_ = config_->allowlist_->lookup(*request_names); + } + } + } } } return Http::FilterHeadersStatus::Continue; } + Http::FilterDataStatus decodeData(Buffer::Instance& data, bool) override { if (grpc_request_) { uint64_t delta = request_counter_.inspect(data); if (delta > 0) { maybeWriteFilterState(); if (doStatTracking()) { - context_.chargeRequestMessageStat(*cluster_, *request_names_, delta); + config_->context_.chargeRequestMessageStat(*cluster_, request_names_, delta); } } } return Http::FilterDataStatus::Continue; } + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) override { grpc_response_ = Grpc::Common::isGrpcResponseHeader(headers, end_stream); if (doStatTracking()) { - context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, *request_names_, - headers.GrpcStatus()); + config_->context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, request_names_, + headers.GrpcStatus()); } return Http::FilterHeadersStatus::Continue; } + Http::FilterDataStatus encodeData(Buffer::Instance& data, bool) override { if (grpc_response_) { uint64_t delta = response_counter_.inspect(data); if (delta > 0) { maybeWriteFilterState(); if (doStatTracking()) { - context_.chargeResponseMessageStat(*cluster_, *request_names_, delta); + config_->context_.chargeResponseMessageStat(*cluster_, request_names_, delta); } } } return Http::FilterDataStatus::Continue; } + Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override { if (doStatTracking()) { - context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, *request_names_, - trailers.GrpcStatus()); + config_->context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, request_names_, + trailers.GrpcStatus()); } return Http::FilterTrailersStatus::Continue; } - bool doStatTracking() const { return request_names_.has_value(); } + bool doStatTracking() const { return do_stat_tracking_; } void maybeWriteFilterState() { - if (!emit_filter_state_) { + if (!config_->emit_filter_state_) { return; } if (filter_object_ == nullptr) { @@ -92,33 +249,34 @@ class GrpcStatsFilter : public Http::PassThroughFilter { } private: - Grpc::Context& context_; - const bool emit_filter_state_; + ConfigConstSharedPtr config_; GrpcStatsObject* filter_object_{}; + bool do_stat_tracking_{false}; bool grpc_request_{false}; bool grpc_response_{false}; Grpc::FrameInspector request_counter_; Grpc::FrameInspector response_counter_; Upstream::ClusterInfoConstSharedPtr cluster_; - absl::optional request_names_; -}; + absl::optional request_names_; +}; // namespace } // namespace -Http::FilterFactoryCb GrpcStatsFilterConfig::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::grpc_stats::v3::FilterConfig& config, +Http::FilterFactoryCb GrpcStatsFilterConfigFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::grpc_stats::v3::FilterConfig& proto_config, const std::string&, Server::Configuration::FactoryContext& factory_context) { - return [&factory_context, emit_filter_state = config.emit_filter_state()]( - Http::FilterChainFactoryCallbacks& callbacks) { - callbacks.addStreamFilter( - std::make_shared(factory_context.grpcContext(), emit_filter_state)); + + ConfigConstSharedPtr config = std::make_shared(proto_config, factory_context); + + return [config](Http::FilterChainFactoryCallbacks& callbacks) { + callbacks.addStreamFilter(std::make_shared(config)); }; } /** * Static registration for the gRPC stats filter. @see RegisterFactory. */ -REGISTER_FACTORY(GrpcStatsFilterConfig, Server::Configuration::NamedHttpFilterConfigFactory); +REGISTER_FACTORY(GrpcStatsFilterConfigFactory, Server::Configuration::NamedHttpFilterConfigFactory); } // namespace GrpcStats } // namespace HttpFilters diff --git a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.h b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.h index cc3b1df1a62a..6afb1861aece 100644 --- a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.h +++ b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.h @@ -26,14 +26,14 @@ struct GrpcStatsObject : public StreamInfo::FilterState::Object { } }; -class GrpcStatsFilterConfig +class GrpcStatsFilterConfigFactory : public Common::FactoryBase { public: - GrpcStatsFilterConfig() : FactoryBase(HttpFilterNames::get().GrpcStats) {} + GrpcStatsFilterConfigFactory() : FactoryBase(HttpFilterNames::get().GrpcStats) {} private: Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::grpc_stats::v3::FilterConfig& config, + const envoy::extensions::filters::http::grpc_stats::v3::FilterConfig& proto_config, const std::string&, Server::Configuration::FactoryContext&) override; }; diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc index 2c64d5d5fe9c..7ead4cd30d55 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc @@ -232,11 +232,11 @@ void GrpcWebFilter::setupStatTracking(const Http::RequestHeaderMap& headers) { if (!cluster_) { return; } - request_names_ = context_.resolveServiceAndMethod(headers.Path()); + request_stat_names_ = context_.resolveDynamicServiceAndMethod(headers.Path()); } void GrpcWebFilter::chargeStat(const Http::ResponseHeaderOrTrailerMap& headers) { - context_.chargeStat(*cluster_, Grpc::Context::Protocol::GrpcWeb, *request_names_, + context_.chargeStat(*cluster_, Grpc::Context::Protocol::GrpcWeb, *request_stat_names_, headers.GrpcStatus()); } diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.h b/source/extensions/filters/http/grpc_web/grpc_web_filter.h index de0490d66462..7dfd54d51f48 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.h +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.h @@ -50,7 +50,7 @@ class GrpcWebFilter : public Http::StreamFilter, NonCopyable { encoder_callbacks_ = &callbacks; } - bool doStatTracking() const { return request_names_.has_value(); } + bool doStatTracking() const { return request_stat_names_.has_value(); } private: friend class GrpcWebFilterTest; @@ -69,7 +69,7 @@ class GrpcWebFilter : public Http::StreamFilter, NonCopyable { bool is_text_response_{}; Buffer::OwnedImpl decoding_buffer_; Grpc::Decoder decoder_; - absl::optional request_names_; + absl::optional request_stat_names_; bool is_grpc_web_request_{}; Grpc::Context& context_; }; diff --git a/source/extensions/filters/http/health_check/config.cc b/source/extensions/filters/http/health_check/config.cc index ac388fcbdb0e..7baa31b01daa 100644 --- a/source/extensions/filters/http/health_check/config.cc +++ b/source/extensions/filters/http/health_check/config.cc @@ -1,5 +1,7 @@ #include "extensions/filters/http/health_check/config.h" +#include + #include "envoy/extensions/filters/http/health_check/v3/health_check.pb.h" #include "envoy/extensions/filters/http/health_check/v3/health_check.pb.validate.h" #include "envoy/registry/registry.h" @@ -31,8 +33,8 @@ Http::FilterFactoryCb HealthCheckFilterConfig::createFilterFactoryFromProtoTyped HealthCheckCacheManagerSharedPtr cache_manager; if (cache_time_ms > 0) { - cache_manager.reset(new HealthCheckCacheManager(context.dispatcher(), - std::chrono::milliseconds(cache_time_ms))); + cache_manager = std::make_shared( + context.dispatcher(), std::chrono::milliseconds(cache_time_ms)); } ClusterMinHealthyPercentagesConstSharedPtr cluster_min_healthy_percentages; diff --git a/source/extensions/filters/http/jwt_authn/authenticator.cc b/source/extensions/filters/http/jwt_authn/authenticator.cc index 140309d6fe38..3837b2c4c034 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.cc +++ b/source/extensions/filters/http/jwt_authn/authenticator.cc @@ -101,12 +101,15 @@ class AuthenticatorImpl : public Logger::Loggable, }; std::string AuthenticatorImpl::name() const { - if (provider_) + if (provider_) { return provider_.value() + (is_allow_missing_ ? "-OPTIONAL" : ""); - if (is_allow_failed_) + } + if (is_allow_failed_) { return "_IS_ALLOW_FALED_"; - if (is_allow_missing_) + } + if (is_allow_missing_) { return "_IS_ALLOW_MISSING_"; + } return "_UNKNOWN_"; } diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index 261901b1484e..fb1ebb21a091 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -11,7 +11,6 @@ #include "absl/strings/match.h" -using envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication; using envoy::extensions::filters::http::jwt_authn::v3::JwtProvider; using Envoy::Http::LowerCaseString; diff --git a/source/extensions/filters/http/jwt_authn/filter_config.h b/source/extensions/filters/http/jwt_authn/filter_config.h index c65670b85976..53c7cc965be3 100644 --- a/source/extensions/filters/http/jwt_authn/filter_config.h +++ b/source/extensions/filters/http/jwt_authn/filter_config.h @@ -104,8 +104,8 @@ class FilterConfigImpl : public Logger::Loggable, bool bypassCorsPreflightRequest() const override { return proto_config_.bypass_cors_preflight(); } - virtual const Verifier* findVerifier(const Http::RequestHeaderMap& headers, - const StreamInfo::FilterState& filter_state) const override { + const Verifier* findVerifier(const Http::RequestHeaderMap& headers, + const StreamInfo::FilterState& filter_state) const override { for (const auto& pair : rule_pairs_) { if (pair.matcher_->matches(headers)) { return pair.verifier_.get(); diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 2d94234b7a45..fc935fa8c33b 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -295,7 +295,8 @@ int StreamHandleWrapper::luaHttpCallAsynchronous(lua_State* state) { return 0; } -void StreamHandleWrapper::onSuccess(Http::ResponseMessagePtr&& response) { +void StreamHandleWrapper::onSuccess(const Http::AsyncClient::Request&, + Http::ResponseMessagePtr&& response) { ASSERT(state_ == State::HttpCall || state_ == State::Running); ENVOY_LOG(debug, "async HTTP response complete"); http_request_ = nullptr; @@ -341,7 +342,8 @@ void StreamHandleWrapper::onSuccess(Http::ResponseMessagePtr&& response) { } } -void StreamHandleWrapper::onFailure(Http::AsyncClient::FailureReason) { +void StreamHandleWrapper::onFailure(const Http::AsyncClient::Request& request, + Http::AsyncClient::FailureReason) { ASSERT(state_ == State::HttpCall || state_ == State::Running); ENVOY_LOG(debug, "async HTTP failure"); @@ -351,7 +353,7 @@ void StreamHandleWrapper::onFailure(Http::AsyncClient::FailureReason) { {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::ServiceUnavailable))}}))); response_message->body() = std::make_unique("upstream failure"); - onSuccess(std::move(response_message)); + onSuccess(request, std::move(response_message)); } int StreamHandleWrapper::luaHeaders(lua_State* state) { diff --git a/source/extensions/filters/http/lua/lua_filter.h b/source/extensions/filters/http/lua/lua_filter.h index ffab53fc67ab..88725c50ef40 100644 --- a/source/extensions/filters/http/lua/lua_filter.h +++ b/source/extensions/filters/http/lua/lua_filter.h @@ -253,8 +253,8 @@ class StreamHandleWrapper : public Filters::Common::Lua::BaseLuaObjectconnection()->requestedServerName(), callbacks_->connection()->remoteAddress()->asString(), - callbacks_->connection()->localAddress()->asString(), + callbacks_->streamInfo().downstreamDirectRemoteAddress()->asString(), + callbacks_->streamInfo().downstreamRemoteAddress()->asString(), + callbacks_->streamInfo().downstreamLocalAddress()->asString(), callbacks_->connection()->ssl() ? "uriSanPeerCertificate: " + absl::StrJoin(callbacks_->connection()->ssl()->uriSanPeerCertificate(), ",") + diff --git a/source/extensions/filters/http/squash/squash_filter.cc b/source/extensions/filters/http/squash/squash_filter.cc index 7ad2f95b715b..ca0c8205f558 100644 --- a/source/extensions/filters/http/squash/squash_filter.cc +++ b/source/extensions/filters/http/squash/squash_filter.cc @@ -309,14 +309,7 @@ void SquashFilter::cleanup() { Json::ObjectSharedPtr SquashFilter::getJsonBody(Http::ResponseMessagePtr&& m) { Buffer::InstancePtr& data = m->body(); - uint64_t num_slices = data->getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - data->getRawSlices(slices.begin(), num_slices); - std::string jsonbody; - for (const Buffer::RawSlice& slice : slices) { - jsonbody += std::string(static_cast(slice.mem_), slice.len_); - } - + std::string jsonbody = data->toString(); return Json::Factory::loadFromString(jsonbody); } diff --git a/source/extensions/filters/http/squash/squash_filter.h b/source/extensions/filters/http/squash/squash_filter.h index 4f07eb875a11..f1b8446a132f 100644 --- a/source/extensions/filters/http/squash/squash_filter.h +++ b/source/extensions/filters/http/squash/squash_filter.h @@ -62,10 +62,12 @@ class AsyncClientCallbackShim : public Http::AsyncClient::Callbacks { std::function&& on_fail) : on_success_(on_success), on_fail_(on_fail) {} // Http::AsyncClient::Callbacks - void onSuccess(Http::ResponseMessagePtr&& m) override { + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& m) override { on_success_(std::forward(m)); } - void onFailure(Http::AsyncClient::FailureReason f) override { on_fail_(f); } + void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason f) override { + on_fail_(f); + } private: const std::function on_success_; diff --git a/source/extensions/filters/http/wasm/BUILD b/source/extensions/filters/http/wasm/BUILD index de342c917702..b24588a51f33 100644 --- a/source/extensions/filters/http/wasm/BUILD +++ b/source/extensions/filters/http/wasm/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -24,10 +25,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "unknown", deps = [ ":wasm_filter_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/listener/http_inspector/config.cc b/source/extensions/filters/listener/http_inspector/config.cc index 6e4f30ae1646..247e1f20a807 100644 --- a/source/extensions/filters/listener/http_inspector/config.cc +++ b/source/extensions/filters/listener/http_inspector/config.cc @@ -17,13 +17,15 @@ namespace HttpInspector { class HttpInspectorConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory { public: // NamedListenerFilterConfigFactory - Network::ListenerFilterFactoryCb - createFilterFactoryFromProto(const Protobuf::Message&, - Server::Configuration::ListenerFactoryContext& context) override { + Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto( + const Protobuf::Message&, + const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + Server::Configuration::ListenerFactoryContext& context) override { ConfigSharedPtr config(std::make_shared(context.scope())); - return [config](Network::ListenerFilterManager& filter_manager) -> void { - filter_manager.addAcceptFilter(std::make_unique(config)); - }; + return + [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void { + filter_manager.addAcceptFilter(listener_filter_matcher, std::make_unique(config)); + }; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/source/extensions/filters/listener/original_dst/config.cc b/source/extensions/filters/listener/original_dst/config.cc index c484c6d8bd59..361b4f80c84d 100644 --- a/source/extensions/filters/listener/original_dst/config.cc +++ b/source/extensions/filters/listener/original_dst/config.cc @@ -19,11 +19,13 @@ namespace OriginalDst { class OriginalDstConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory { public: // NamedListenerFilterConfigFactory - Network::ListenerFilterFactoryCb - createFilterFactoryFromProto(const Protobuf::Message&, - Server::Configuration::ListenerFactoryContext&) override { - return [](Network::ListenerFilterManager& filter_manager) -> void { - filter_manager.addAcceptFilter(std::make_unique()); + Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto( + const Protobuf::Message&, + const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + Server::Configuration::ListenerFactoryContext&) override { + return [listener_filter_matcher](Network::ListenerFilterManager& filter_manager) -> void { + filter_manager.addAcceptFilter(listener_filter_matcher, + std::make_unique()); }; } diff --git a/source/extensions/filters/listener/original_src/original_src_config_factory.cc b/source/extensions/filters/listener/original_src/original_src_config_factory.cc index 77f0d7aad30a..157e1a92efdd 100644 --- a/source/extensions/filters/listener/original_src/original_src_config_factory.cc +++ b/source/extensions/filters/listener/original_src/original_src_config_factory.cc @@ -13,14 +13,17 @@ namespace Extensions { namespace ListenerFilters { namespace OriginalSrc { -Network::ListenerFilterFactoryCb OriginalSrcConfigFactory::createFilterFactoryFromProto( - const Protobuf::Message& message, Server::Configuration::ListenerFactoryContext& context) { +Network::ListenerFilterFactoryCb OriginalSrcConfigFactory::createListenerFilterFactoryFromProto( + const Protobuf::Message& message, + const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + Server::Configuration::ListenerFactoryContext& context) { auto proto_config = MessageUtil::downcastAndValidate< const envoy::extensions::filters::listener::original_src::v3::OriginalSrc&>( message, context.messageValidationVisitor()); Config config(proto_config); - return [config](Network::ListenerFilterManager& filter_manager) -> void { - filter_manager.addAcceptFilter(std::make_unique(config)); + return [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void { + filter_manager.addAcceptFilter(listener_filter_matcher, + std::make_unique(config)); }; } diff --git a/source/extensions/filters/listener/original_src/original_src_config_factory.h b/source/extensions/filters/listener/original_src/original_src_config_factory.h index ea1a3cbd4ee3..94197935ab07 100644 --- a/source/extensions/filters/listener/original_src/original_src_config_factory.h +++ b/source/extensions/filters/listener/original_src/original_src_config_factory.h @@ -15,9 +15,10 @@ namespace OriginalSrc { class OriginalSrcConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory { public: // NamedListenerFilterConfigFactory - Network::ListenerFilterFactoryCb - createFilterFactoryFromProto(const Protobuf::Message& message, - Server::Configuration::ListenerFactoryContext& context) override; + Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto( + const Protobuf::Message& message, + const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + Server::Configuration::ListenerFactoryContext& context) override; ProtobufTypes::MessagePtr createEmptyConfigProto() override; diff --git a/source/extensions/filters/listener/proxy_protocol/config.cc b/source/extensions/filters/listener/proxy_protocol/config.cc index d50eeb87c541..0fa044542d08 100644 --- a/source/extensions/filters/listener/proxy_protocol/config.cc +++ b/source/extensions/filters/listener/proxy_protocol/config.cc @@ -17,13 +17,15 @@ namespace ProxyProtocol { class ProxyProtocolConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory { public: // NamedListenerFilterConfigFactory - Network::ListenerFilterFactoryCb - createFilterFactoryFromProto(const Protobuf::Message&, - Server::Configuration::ListenerFactoryContext& context) override { + Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto( + const Protobuf::Message&, + const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + Server::Configuration::ListenerFactoryContext& context) override { ConfigSharedPtr config(new Config(context.scope())); - return [config](Network::ListenerFilterManager& filter_manager) -> void { - filter_manager.addAcceptFilter(std::make_unique(config)); - }; + return + [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void { + filter_manager.addAcceptFilter(listener_filter_matcher, std::make_unique(config)); + }; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h b/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h index 4d3ea34c138e..63c3c96eadf0 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h @@ -11,6 +11,7 @@ namespace ProxyProtocol { // See https://github.com/haproxy/haproxy/blob/master/doc/proxy-protocol.txt for definitions +// TODO(wez470): Refactor listener filter to use common proxy proto constants constexpr char PROXY_PROTO_V1_SIGNATURE[] = "PROXY "; constexpr uint32_t PROXY_PROTO_V1_SIGNATURE_LEN = 6; constexpr char PROXY_PROTO_V2_SIGNATURE[] = "\x0d\x0a\x0d\x0a\x00\x0d\x0a\x51\x55\x49\x54\x0a"; diff --git a/source/extensions/filters/listener/tls_inspector/config.cc b/source/extensions/filters/listener/tls_inspector/config.cc index befd05e75855..058a5ddaf7c5 100644 --- a/source/extensions/filters/listener/tls_inspector/config.cc +++ b/source/extensions/filters/listener/tls_inspector/config.cc @@ -19,13 +19,15 @@ namespace TlsInspector { class TlsInspectorConfigFactory : public Server::Configuration::NamedListenerFilterConfigFactory { public: // NamedListenerFilterConfigFactory - Network::ListenerFilterFactoryCb - createFilterFactoryFromProto(const Protobuf::Message&, - Server::Configuration::ListenerFactoryContext& context) override { + Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto( + const Protobuf::Message&, + const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + Server::Configuration::ListenerFactoryContext& context) override { ConfigSharedPtr config(new Config(context.scope())); - return [config](Network::ListenerFilterManager& filter_manager) -> void { - filter_manager.addAcceptFilter(std::make_unique(config)); - }; + return + [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void { + filter_manager.addAcceptFilter(listener_filter_matcher, std::make_unique(config)); + }; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/source/extensions/filters/network/common/redis/codec_impl.cc b/source/extensions/filters/network/common/redis/codec_impl.cc index 2b5e1917686c..a668a7868e26 100644 --- a/source/extensions/filters/network/common/redis/codec_impl.cc +++ b/source/extensions/filters/network/common/redis/codec_impl.cc @@ -330,10 +330,7 @@ RespValue::CompositeArray::CompositeArrayConstIterator::empty() { } void DecoderImpl::decode(Buffer::Instance& data) { - uint64_t num_slices = data.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - data.getRawSlices(slices.begin(), num_slices); - for (const Buffer::RawSlice& slice : slices) { + for (const Buffer::RawSlice& slice : data.getRawSlices()) { parseSlice(slice); } diff --git a/source/extensions/filters/network/direct_response/filter.cc b/source/extensions/filters/network/direct_response/filter.cc index e01266157838..0f0cfe4d329f 100644 --- a/source/extensions/filters/network/direct_response/filter.cc +++ b/source/extensions/filters/network/direct_response/filter.cc @@ -11,13 +11,16 @@ namespace NetworkFilters { namespace DirectResponse { Network::FilterStatus DirectResponseFilter::onNewConnection() { - ENVOY_CONN_LOG(trace, "direct_response: new connection", read_callbacks_->connection()); - if (response_.size() > 0) { + auto& connection = read_callbacks_->connection(); + ENVOY_CONN_LOG(trace, "direct_response: new connection", connection); + if (!response_.empty()) { Buffer::OwnedImpl data(response_); - read_callbacks_->connection().write(data, false); + connection.write(data, false); ASSERT(0 == data.length()); } - read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + connection.streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().DirectResponse); + connection.close(Network::ConnectionCloseType::FlushWrite); return Network::FilterStatus::StopIteration; } diff --git a/source/extensions/filters/network/dubbo_proxy/filters/filter_config.h b/source/extensions/filters/network/dubbo_proxy/filters/filter_config.h index 3eb49c6855f3..2138d44bb062 100644 --- a/source/extensions/filters/network/dubbo_proxy/filters/filter_config.h +++ b/source/extensions/filters/network/dubbo_proxy/filters/filter_config.h @@ -23,7 +23,7 @@ namespace DubboFilters { */ class NamedDubboFilterConfigFactory : public Envoy::Config::TypedFactory { public: - virtual ~NamedDubboFilterConfigFactory() = default; + ~NamedDubboFilterConfigFactory() override = default; /** * Create a particular dubbo filter factory implementation. If the implementation is unable to diff --git a/source/extensions/filters/network/dubbo_proxy/protocol.h b/source/extensions/filters/network/dubbo_proxy/protocol.h index 0c44980d5784..b496699d42c2 100644 --- a/source/extensions/filters/network/dubbo_proxy/protocol.h +++ b/source/extensions/filters/network/dubbo_proxy/protocol.h @@ -100,7 +100,7 @@ using ProtocolPtr = std::unique_ptr; */ class NamedProtocolConfigFactory : public Config::UntypedFactory { public: - virtual ~NamedProtocolConfigFactory() = default; + ~NamedProtocolConfigFactory() override = default; /** * Create a particular Dubbo protocol. diff --git a/source/extensions/filters/network/dubbo_proxy/router/route.h b/source/extensions/filters/network/dubbo_proxy/router/route.h index fd7ece10428e..c9814aa18f2a 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/route.h +++ b/source/extensions/filters/network/dubbo_proxy/router/route.h @@ -69,7 +69,7 @@ using RouteMatcherConstSharedPtr = std::shared_ptr; */ class NamedRouteMatcherConfigFactory : public Envoy::Config::UntypedFactory { public: - virtual ~NamedRouteMatcherConfigFactory() = default; + ~NamedRouteMatcherConfigFactory() override = default; /** * Create a particular Dubbo protocol. diff --git a/source/extensions/filters/network/dubbo_proxy/serializer.h b/source/extensions/filters/network/dubbo_proxy/serializer.h index 3b6c85dc7f18..2d3c1125cb7f 100644 --- a/source/extensions/filters/network/dubbo_proxy/serializer.h +++ b/source/extensions/filters/network/dubbo_proxy/serializer.h @@ -81,7 +81,7 @@ using SerializerPtr = std::unique_ptr; */ class NamedSerializerConfigFactory : public Config::UntypedFactory { public: - virtual ~NamedSerializerConfigFactory() = default; + ~NamedSerializerConfigFactory() override = default; /** * Create a particular Dubbo serializer. diff --git a/source/extensions/filters/network/ext_authz/ext_authz.cc b/source/extensions/filters/network/ext_authz/ext_authz.cc index cf4a8a4ab189..af9117884608 100644 --- a/source/extensions/filters/network/ext_authz/ext_authz.cc +++ b/source/extensions/filters/network/ext_authz/ext_authz.cc @@ -28,7 +28,8 @@ void Filter::callCheck() { config_->stats().total_.inc(); calling_check_ = true; - client_->check(*this, check_request_, Tracing::NullSpan::instance()); + client_->check(*this, check_request_, Tracing::NullSpan::instance(), + filter_callbacks_->connection().streamInfo()); calling_check_ = false; } diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index 641d6c6250ec..b7ebe990a80d 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -22,6 +22,7 @@ envoy_cc_extension( "//include/envoy/filesystem:filesystem_interface", "//include/envoy/http:codec_interface", "//include/envoy/http:filter_interface", + "//include/envoy/http:request_id_extension_interface", "//include/envoy/registry", "//include/envoy/router:route_config_provider_manager_interface", "//include/envoy/server:admin_interface", @@ -32,6 +33,7 @@ envoy_cc_extension( "//source/common/config:utility_lib", "//source/common/http:conn_manager_lib", "//source/common/http:default_server_string_lib", + "//source/common/http:request_id_extension_lib", "//source/common/http:utility_lib", "//source/common/http/http1:codec_lib", "//source/common/http/http2:codec_lib", diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 546002ef7686..ed107562bccd 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -23,8 +23,11 @@ #include "common/http/http2/codec_impl.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" +#include "common/http/request_id_extension_impl.h" #include "common/http/utility.h" #include "common/protobuf/utility.h" +#include "common/router/rds_impl.h" +#include "common/router/scoped_rds.h" #include "common/runtime/runtime_impl.h" #include "common/tracing/http_tracer_config_impl.h" #include "common/tracing/http_tracer_manager_impl.h" @@ -211,7 +214,9 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( context.runtime().snapshot().featureEnabled("http_connection_manager.normalize_path", 0))), #endif - merge_slashes_(config.merge_slashes()) { + merge_slashes_(config.merge_slashes()), + headers_with_underscores_action_( + config.common_http_protocol_options().headers_with_underscores_action()) { // If idle_timeout_ was not configured in common_http_protocol_options, use value in deprecated // idle_timeout field. // TODO(asraa): Remove when idle_timeout is removed. @@ -224,6 +229,15 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( idle_timeout_ = absl::nullopt; } + // If we are provided a different request_id_extension implementation to use try and create a new + // instance of it, otherwise use default one. + if (config.request_id_extension().has_typed_config()) { + request_id_extension_ = + Http::RequestIDExtensionFactory::fromProto(config.request_id_extension(), context_); + } else { + request_id_extension_ = Http::RequestIDExtensionFactory::defaultInstance(context_.random()); + } + // If scoped RDS is enabled, avoid creating a route config provider. Route config providers will // be managed by the scoped routing logic instead. switch (config.route_specifier_case()) { @@ -461,11 +475,11 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, case CodecType::HTTP1: return std::make_unique( connection, context_.scope(), callbacks, http1_settings_, maxRequestHeadersKb(), - maxRequestHeadersCount()); + maxRequestHeadersCount(), headersWithUnderscoresAction()); case CodecType::HTTP2: return std::make_unique( connection, callbacks, context_.scope(), http2_options_, maxRequestHeadersKb(), - maxRequestHeadersCount()); + maxRequestHeadersCount(), headersWithUnderscoresAction()); case CodecType::HTTP3: // Hard code Quiche factory name here to instantiate a QUIC codec implemented. // TODO(danzh) Add support to get the factory name from config, possibly @@ -478,7 +492,7 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, case CodecType::AUTO: return Http::ConnectionManagerUtility::autoCreateCodec( connection, data, callbacks, context_.scope(), http1_settings_, http2_options_, - maxRequestHeadersKb(), maxRequestHeadersCount()); + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); } NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index eaf59effc360..59dee762513f 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -11,6 +11,7 @@ #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" #include "envoy/http/filter.h" +#include "envoy/http/request_id_extension.h" #include "envoy/router/route_config_provider_manager.h" #include "envoy/tracing/http_tracer_manager.h" @@ -98,6 +99,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, Http::FilterChainFactoryCallbacks& callbacks) override; // Http::ConnectionManagerConfig + Http::RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; } const std::list& accessLogs() override { return access_logs_; } Http::ServerConnectionPtr createCodec(Network::Connection& connection, const Buffer::Instance& data, @@ -154,6 +156,10 @@ class HttpConnectionManagerConfig : Logger::Loggable, const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const override { + return headers_with_underscores_action_; + } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } private: @@ -172,6 +178,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& filter_config); + Http::RequestIDExtensionSharedPtr request_id_extension_; Server::Configuration::FactoryContext& context_; FilterFactoriesList filter_factories_; std::map upgrade_filter_factories_; @@ -215,6 +222,8 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::chrono::milliseconds delayed_close_timeout_; const bool normalize_path_; const bool merge_slashes_; + const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; // Default idle timeout is 5 minutes if nothing is specified in the HCM config. static const uint64_t StreamIdleTimeoutMs = 5 * 60 * 1000; diff --git a/source/extensions/filters/network/kafka/codec.h b/source/extensions/filters/network/kafka/codec.h index 698dc24964f7..c23057f09bcd 100644 --- a/source/extensions/filters/network/kafka/codec.h +++ b/source/extensions/filters/network/kafka/codec.h @@ -66,11 +66,8 @@ class AbstractMessageDecoder : public MessageDecoder { * Impl note: similar to redis codec, which also keeps state. */ void onData(Buffer::Instance& data) override { - // Convert buffer to slices and pass them to `doParse`. - uint64_t num_slices = data.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - data.getRawSlices(slices.begin(), num_slices); - for (const Buffer::RawSlice& slice : slices) { + // Pass slices to `doParse`. + for (const Buffer::RawSlice& slice : data.getRawSlices()) { doParse(slice); } } diff --git a/source/extensions/filters/network/kafka/serialization.h b/source/extensions/filters/network/kafka/serialization.h index 7e87c268e81c..8d157172891a 100644 --- a/source/extensions/filters/network/kafka/serialization.h +++ b/source/extensions/filters/network/kafka/serialization.h @@ -35,7 +35,7 @@ template class Deserializer { /** * The type this deserializer is deserializing. */ - typedef T result_type; + using result_type = T; virtual ~Deserializer() = default; diff --git a/source/extensions/filters/network/mongo_proxy/config.cc b/source/extensions/filters/network/mongo_proxy/config.cc index 847ac601a9a7..13ed4139bbb4 100644 --- a/source/extensions/filters/network/mongo_proxy/config.cc +++ b/source/extensions/filters/network/mongo_proxy/config.cc @@ -1,5 +1,7 @@ #include "extensions/filters/network/mongo_proxy/config.h" +#include + #include "envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.h" #include "envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.validate.h" #include "envoy/network/connection.h" @@ -23,8 +25,8 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP const std::string stat_prefix = fmt::format("mongo.{}", proto_config.stat_prefix()); AccessLogSharedPtr access_log; if (!proto_config.access_log().empty()) { - access_log.reset(new AccessLog(proto_config.access_log(), context.accessLogManager(), - context.dispatcher().timeSource())); + access_log = std::make_shared(proto_config.access_log(), context.accessLogManager(), + context.dispatcher().timeSource()); } Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config; diff --git a/source/extensions/filters/network/rbac/rbac_filter.cc b/source/extensions/filters/network/rbac/rbac_filter.cc index 85cabdac77d5..1bc12017b3b6 100644 --- a/source/extensions/filters/network/rbac/rbac_filter.cc +++ b/source/extensions/filters/network/rbac/rbac_filter.cc @@ -21,22 +21,23 @@ RoleBasedAccessControlFilterConfig::RoleBasedAccessControlFilterConfig( enforcement_type_(proto_config.enforcement_type()) {} Network::FilterStatus RoleBasedAccessControlFilter::onData(Buffer::Instance&, bool) { - ENVOY_LOG( - debug, - "checking connection: requestedServerName: {}, remoteAddress: {}, localAddress: {}, ssl: {}, " - "dynamicMetadata: {}", - callbacks_->connection().requestedServerName(), - callbacks_->connection().remoteAddress()->asString(), - callbacks_->connection().localAddress()->asString(), - callbacks_->connection().ssl() - ? "uriSanPeerCertificate: " + - absl::StrJoin(callbacks_->connection().ssl()->uriSanPeerCertificate(), ",") + - ", dnsSanPeerCertificate: " + - absl::StrJoin(callbacks_->connection().ssl()->dnsSansPeerCertificate(), ",") + - ", subjectPeerCertificate: " + - callbacks_->connection().ssl()->subjectPeerCertificate() - : "none", - callbacks_->connection().streamInfo().dynamicMetadata().DebugString()); + ENVOY_LOG(debug, + "checking connection: requestedServerName: {}, sourceIP: {}, directRemoteIP: {}," + "remoteIP: {}, localAddress: {}, ssl: {}, dynamicMetadata: {}", + callbacks_->connection().requestedServerName(), + callbacks_->connection().remoteAddress()->asString(), + callbacks_->connection().streamInfo().downstreamDirectRemoteAddress()->asString(), + callbacks_->connection().streamInfo().downstreamRemoteAddress()->asString(), + callbacks_->connection().streamInfo().downstreamLocalAddress()->asString(), + callbacks_->connection().ssl() + ? "uriSanPeerCertificate: " + + absl::StrJoin(callbacks_->connection().ssl()->uriSanPeerCertificate(), ",") + + ", dnsSanPeerCertificate: " + + absl::StrJoin(callbacks_->connection().ssl()->dnsSansPeerCertificate(), ",") + + ", subjectPeerCertificate: " + + callbacks_->connection().ssl()->subjectPeerCertificate() + : "none", + callbacks_->connection().streamInfo().dynamicMetadata().DebugString()); // When the enforcement type is continuous always do the RBAC checks. If it is a one time check, // run the check once and skip it for subsequent onData calls. diff --git a/source/extensions/filters/network/thrift_proxy/filters/filter_config.h b/source/extensions/filters/network/thrift_proxy/filters/filter_config.h index a45764b15252..62d900a8b1d6 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/filter_config.h +++ b/source/extensions/filters/network/thrift_proxy/filters/filter_config.h @@ -20,7 +20,7 @@ namespace ThriftFilters { */ class NamedThriftFilterConfigFactory : public Envoy::Config::TypedFactory { public: - virtual ~NamedThriftFilterConfigFactory() = default; + ~NamedThriftFilterConfigFactory() override = default; /** * Create a particular thrift filter factory implementation. If the implementation is unable to diff --git a/source/extensions/filters/network/thrift_proxy/protocol.h b/source/extensions/filters/network/thrift_proxy/protocol.h index eba0d7efdb7a..3a76799229da 100644 --- a/source/extensions/filters/network/thrift_proxy/protocol.h +++ b/source/extensions/filters/network/thrift_proxy/protocol.h @@ -501,7 +501,7 @@ class DirectResponse { */ class NamedProtocolConfigFactory : public Config::UntypedFactory { public: - virtual ~NamedProtocolConfigFactory() = default; + ~NamedProtocolConfigFactory() override = default; /** * Create a particular Thrift protocol diff --git a/source/extensions/filters/network/thrift_proxy/transport.h b/source/extensions/filters/network/thrift_proxy/transport.h index da1632c5765d..d8393cef6870 100644 --- a/source/extensions/filters/network/thrift_proxy/transport.h +++ b/source/extensions/filters/network/thrift_proxy/transport.h @@ -85,7 +85,7 @@ using TransportPtr = std::unique_ptr; */ class NamedTransportConfigFactory : public Envoy::Config::UntypedFactory { public: - virtual ~NamedTransportConfigFactory() = default; + ~NamedTransportConfigFactory() override = default; /** * Create a particular Thrift transport. diff --git a/source/extensions/filters/network/wasm/BUILD b/source/extensions/filters/network/wasm/BUILD index 6841c62bf684..0e17095e8f72 100644 --- a/source/extensions/filters/network/wasm/BUILD +++ b/source/extensions/filters/network/wasm/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -23,10 +24,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "unknown", deps = [ ":wasm_filter_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD new file mode 100644 index 000000000000..3020f321940e --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -0,0 +1,45 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "dns_filter_lib", + srcs = ["dns_filter.cc"], + hdrs = ["dns_filter.h"], + deps = [ + "//include/envoy/buffer:buffer_interface", + "//include/envoy/event:file_event_interface", + "//include/envoy/event:timer_interface", + "//include/envoy/network:address_interface", + "//include/envoy/network:filter_interface", + "//include/envoy/network:listener_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:empty_string", + "//source/common/config:config_provider_lib", + "//source/common/network:address_lib", + "//source/common/network:utility_lib", + "//source/common/router:rds_lib", + "@envoy_api//envoy/config/filter/udp/dns_filter/v2alpha:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", + status = "alpha", + deps = [ + ":dns_filter_lib", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "@envoy_api//envoy/config/filter/udp/dns_filter/v2alpha:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/udp/dns_filter/config.cc b/source/extensions/filters/udp/dns_filter/config.cc new file mode 100644 index 000000000000..f5bae1c6ec0e --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/config.cc @@ -0,0 +1,37 @@ +#include "extensions/filters/udp/dns_filter/config.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +Network::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryFromProto( + const Protobuf::Message& config, Server::Configuration::ListenerFactoryContext& context) { + auto shared_config = std::make_shared( + context, MessageUtil::downcastAndValidate< + const envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig&>( + config, context.messageValidationVisitor())); + + return [shared_config](Network::UdpListenerFilterManager& filter_manager, + Network::UdpReadFilterCallbacks& callbacks) -> void { + filter_manager.addReadFilter(std::make_unique(callbacks, shared_config)); + }; +} + +ProtobufTypes::MessagePtr DnsFilterConfigFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +std::string DnsFilterConfigFactory::name() const { return "envoy.filters.udp.dns_filter"; } + +/** + * Static registration for the DNS Filter. @see RegisterFactory. + */ +static Registry::RegisterFactory + register_; + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/dns_filter/config.h b/source/extensions/filters/udp/dns_filter/config.h new file mode 100644 index 000000000000..8031f450a092 --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/config.h @@ -0,0 +1,31 @@ +#pragma once + +#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.h" +#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.validate.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/udp/dns_filter/dns_filter.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +/** + * Config registration for the UDP proxy filter. @see NamedUdpListenerFilterConfigFactory. + */ +class DnsFilterConfigFactory : public Server::Configuration::NamedUdpListenerFilterConfigFactory { +public: + // NamedUdpListenerFilterConfigFactory + Network::UdpListenerFilterFactoryCb + createFilterFactoryFromProto(const Protobuf::Message& config, + Server::Configuration::ListenerFactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + std::string name() const override; +}; + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc new file mode 100644 index 000000000000..f2eeaaada0cb --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -0,0 +1,57 @@ +#include "extensions/filters/udp/dns_filter/dns_filter.h" + +#include "envoy/network/listener.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( + Server::Configuration::ListenerFactoryContext& context, + const envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig& config) + : root_scope_(context.scope()), stats_(generateStats(config.stat_prefix(), root_scope_)) { + + using envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig; + + // store configured data for server context + const auto& server_config = config.server_config(); + + if (server_config.has_inline_dns_table()) { + + const auto& cfg = server_config.inline_dns_table(); + const size_t entries = cfg.virtual_domains().size(); + + // TODO (abaptiste): Check that the domain configured here appears + // in the known domains list + virtual_domains_.reserve(entries); + for (const auto& virtual_domain : cfg.virtual_domains()) { + DnsAddressList addresses{}; + + if (virtual_domain.endpoint().has_address_list()) { + const auto& address_list = virtual_domain.endpoint().address_list().address(); + addresses.reserve(address_list.size()); + for (const auto& configured_address : address_list) { + addresses.push_back(configured_address); + } + } + + virtual_domains_.emplace(virtual_domain.name(), std::move(addresses)); + } + } +} + +void DnsFilter::onData(Network::UdpRecvData& client_request) { + // Handle incoming request and respond with an answer + UNREFERENCED_PARAMETER(client_request); +} + +void DnsFilter::onReceiveError(Api::IoError::IoErrorCode error_code) { + // Increment error stats + UNREFERENCED_PARAMETER(error_code); +} + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.h b/source/extensions/filters/udp/dns_filter/dns_filter.h new file mode 100644 index 000000000000..f62d0c8162ba --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/dns_filter.h @@ -0,0 +1,78 @@ +#pragma once + +#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.h" +#include "envoy/network/filter.h" + +#include "common/buffer/buffer_impl.h" +#include "common/config/config_provider_impl.h" +#include "common/network/utility.h" +#include "common/runtime/runtime_impl.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +/** + * All Dns Filter stats. @see stats_macros.h + * Track the number of answered and un-answered queries for A and AAAA records + */ +#define ALL_DNS_FILTER_STATS(COUNTER) \ + COUNTER(queries_a_record) \ + COUNTER(noanswers_a_record) \ + COUNTER(answers_a_record) \ + COUNTER(queries_aaaa_record) \ + COUNTER(noanswers_aaaa_record) \ + COUNTER(answers_aaaa_record) + +/** + * Struct definition for all Dns Filter stats. @see stats_macros.h + */ +struct DnsFilterStats { + ALL_DNS_FILTER_STATS(GENERATE_COUNTER_STRUCT) +}; + +using DnsAddressList = std::vector; +using DnsVirtualDomainConfig = absl::flat_hash_map; + +class DnsFilterEnvoyConfig { +public: + DnsFilterEnvoyConfig( + Server::Configuration::ListenerFactoryContext& context, + const envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig& config); + + DnsFilterStats& stats() const { return stats_; } + DnsVirtualDomainConfig& domains() const { return virtual_domains_; } + +private: + static DnsFilterStats generateStats(const std::string& stat_prefix, Stats::Scope& scope) { + const auto final_prefix = absl::StrCat("dns_filter.", stat_prefix); + return {ALL_DNS_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; + } + + Stats::Scope& root_scope_; + mutable DnsFilterStats stats_; + mutable DnsVirtualDomainConfig virtual_domains_; +}; + +using DnsFilterEnvoyConfigSharedPtr = std::shared_ptr; + +class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable { +public: + DnsFilter(Network::UdpReadFilterCallbacks& callbacks, const DnsFilterEnvoyConfigSharedPtr& config) + : UdpListenerReadFilter(callbacks), config_(config), listener_(callbacks.udpListener()) {} + + // Network::UdpListenerReadFilter callbacks + void onData(Network::UdpRecvData& client_request) override; + void onReceiveError(Api::IoError::IoErrorCode error_code) override; + +private: + const DnsFilterEnvoyConfigSharedPtr config_; + Network::UdpListener& listener_; + Runtime::RandomGeneratorImpl rng_; +}; + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index 049125ba4054..d96eda529995 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -113,7 +113,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, public: ActiveSession(ClusterInfo& parent, Network::UdpRecvData::LocalPeerAddresses&& addresses, const Upstream::HostConstSharedPtr& host); - ~ActiveSession(); + ~ActiveSession() override; const Network::UdpRecvData::LocalPeerAddresses& addresses() const { return addresses_; } const Upstream::Host& host() const { return *host_; } void write(const Buffer::Instance& buffer); diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc index b9407cba2626..954441465cc5 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc @@ -69,6 +69,7 @@ ActiveQuicListener::~ActiveQuicListener() { onListenerShutdown(); } void ActiveQuicListener::onListenerShutdown() { ENVOY_LOG(info, "Quic listener {} shutdown.", config_.name()); quic_dispatcher_->Shutdown(); + udp_listener_.reset(); } void ActiveQuicListener::onData(Network::UdpRecvData& data) { @@ -81,13 +82,11 @@ void ActiveQuicListener::onData(Network::UdpRecvData& data) { quic::QuicTime::Delta::FromMicroseconds(std::chrono::duration_cast( data.receive_time_.time_since_epoch()) .count()); - uint64_t num_slice = data.buffer_->getRawSlices(nullptr, 0); - ASSERT(num_slice == 1); - Buffer::RawSlice slice; - data.buffer_->getRawSlices(&slice, 1); + ASSERT(data.buffer_->getRawSlices().size() == 1); + Buffer::RawSliceVector slices = data.buffer_->getRawSlices(/*max_slices=*/1); // TODO(danzh): pass in TTL and UDP header. - quic::QuicReceivedPacket packet(reinterpret_cast(slice.mem_), slice.len_, timestamp, - /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false, + quic::QuicReceivedPacket packet(reinterpret_cast(slices[0].mem_), slices[0].len_, + timestamp, /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false, /*packet_headers=*/nullptr, /*headers_length=*/0, /*owns_header_buffer*/ false); quic_dispatcher_->ProcessPacket(self_address, peer_address, packet); @@ -101,6 +100,16 @@ void ActiveQuicListener::onWriteReady(const Network::Socket& /*socket*/) { quic_dispatcher_->OnCanWrite(); } +void ActiveQuicListener::pauseListening() { quic_dispatcher_->StopAcceptingNewConnections(); } + +void ActiveQuicListener::resumeListening() { quic_dispatcher_->StartAcceptingNewConnections(); } + +void ActiveQuicListener::shutdownListener() { + // Same as pauseListening() because all we want is to stop accepting new + // connections. + quic_dispatcher_->StopAcceptingNewConnections(); +} + ActiveQuicListenerFactory::ActiveQuicListenerFactory( const envoy::config::listener::v3::QuicProtocolOptions& config, uint32_t concurrency) : concurrency_(concurrency) { diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.h b/source/extensions/quic_listeners/quiche/active_quic_listener.h index 09d314abe139..6536731c199f 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.h +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.h @@ -34,7 +34,6 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, ~ActiveQuicListener() override; - // TODO(#7465): Make this a callback. void onListenerShutdown(); // Network::UdpListenerCallbacks @@ -47,7 +46,9 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, // ActiveListenerImplBase Network::Listener* listener() override { return udp_listener_.get(); } - void destroy() override { udp_listener_.reset(); } + void pauseListening() override; + void resumeListening() override; + void shutdownListener() override; private: friend class ActiveQuicListenerPeer; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.h b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.h index ffe4d020e09a..ec56eafa6fff 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.h @@ -21,7 +21,7 @@ class EnvoyQuicAlarm : public quic::QuicAlarm { quic::QuicArenaScopedPtr delegate); // TimerImpl destruction deletes in-flight alarm firing event. - ~EnvoyQuicAlarm() override {} + ~EnvoyQuicAlarm() override = default; // quic::QuicAlarm void CancelImpl() override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc index c287c8e3eb95..0ae38b38dbb2 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc @@ -58,12 +58,10 @@ void EnvoyQuicClientConnection::processPacket( quic::QuicTime::Delta::FromMicroseconds( std::chrono::duration_cast(receive_time.time_since_epoch()) .count()); - uint64_t num_slice = buffer->getRawSlices(nullptr, 0); - ASSERT(num_slice == 1); - Buffer::RawSlice slice; - buffer->getRawSlices(&slice, 1); - quic::QuicReceivedPacket packet(reinterpret_cast(slice.mem_), slice.len_, timestamp, - /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false, + ASSERT(buffer->getRawSlices().size() == 1); + Buffer::RawSliceVector slices = buffer->getRawSlices(/*max_slices=*/1); + quic::QuicReceivedPacket packet(reinterpret_cast(slices[0].mem_), slices[0].len_, + timestamp, /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false, /*packet_headers=*/nullptr, /*headers_length=*/0, /*owns_header_buffer*/ false); ProcessUdpPacket(envoyAddressInstanceToQuicSocketAddress(local_address), diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h index cabb3197c1cb..8884c63dac99 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h @@ -29,6 +29,9 @@ class EnvoyQuicClientStream : public quic::QuicSpdyClientStream, // Http::StreamEncoder void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) override; + Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { + return absl::nullopt; + } // Http::RequestEncoder void encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h index c8355717bccb..49abe56e9122 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h @@ -49,7 +49,7 @@ class EnvoyQuicFakeProofVerifier : public quic::ProofVerifier { std::unique_ptr* /*details*/, std::unique_ptr /*callback*/) override { // Cert SCT support is not enabled for fake ProofSource. - if (cert_sct == "" && certs.size() == 1 && certs[0] == "Fake cert") { + if (cert_sct.empty() && certs.size() == 1 && certs[0] == "Fake cert") { return quic::QUIC_SUCCESS; } return quic::QUIC_FAILURE; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h index 88ce264d0246..59c03e79509a 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h @@ -33,6 +33,9 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeTrailers(const Http::ResponseTrailerMap& trailers) override; void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) override; + Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { + return absl::nullopt; + } // Http::Stream void resetStream(Http::StreamResetReason reason) override; diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h index 85222133b6e7..2043554da438 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_list.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h @@ -13,7 +13,7 @@ #if defined(QUICHE_FLAG) -QUICHE_FLAG(bool, http2_reloadable_flag_http2_add_backend_ping_manager, false, +QUICHE_FLAG(bool, http2_reloadable_flag_http2_add_backend_ping_manager, true, "If true, SpdyBackendDispatcher will instantiate and use a PeriodicPingManager for " "handling PING logic.") @@ -21,11 +21,6 @@ QUICHE_FLAG( bool, http2_reloadable_flag_http2_backend_alpn_failure_error_code, false, "If true, the GFE will return a new ResponseCodeDetails error when ALPN to the backend fails.") -QUICHE_FLAG(bool, http2_reloadable_flag_http2_refactor_client_ping_rtt, true, - "If true, logic for coordinating RTT PINGs moves from " - "SpdyClientDispatcher::PingManager into SpdyClientDispatcher proper. No expected " - "functional change, but protected out of an abundance of caution.") - QUICHE_FLAG(bool, http2_reloadable_flag_http2_security_requirement_for_client3, false, "If true, check whether client meets security requirements during SSL handshake. If " "flag is true and client does not meet security requirements, do not negotiate HTTP/2 " @@ -33,15 +28,15 @@ QUICHE_FLAG(bool, http2_reloadable_flag_http2_security_requirement_for_client3, "already negotiated. The spec contains both cipher and TLS version requirements.") QUICHE_FLAG( - bool, http2_reloadable_flag_http2_skip_querying_entry_buffer_error, false, + bool, http2_reloadable_flag_http2_skip_querying_entry_buffer_error, true, "If true, do not query entry_buffer_.error_detected() in HpackDecoder::error_detected().") QUICHE_FLAG( - bool, http2_reloadable_flag_http2_support_periodic_ping_manager_cbs, false, + bool, http2_reloadable_flag_http2_support_periodic_ping_manager_cbs, true, "If true, PeriodicPingManager will invoke user-provided callbacks on receiving PING acks.") QUICHE_FLAG( - bool, http2_reloadable_flag_http2_use_settings_rtt_in_ping_manager, false, + bool, http2_reloadable_flag_http2_use_settings_rtt_in_ping_manager, true, "If true along with --gfe2_reloadable_flag_http2_add_backend_ping_manager, SpdyDispatcher will " "bootstrap its PingManager RTT with the RTT determined from the initial SETTINGS<-->ack.") @@ -64,22 +59,18 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_client_enabled_bbr_v2, true, QUICHE_FLAG(bool, quic_reloadable_flag_quic_alpn_dispatch, false, "Support different QUIC sessions, as indicated by ALPN. Used for QBONE.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_arm_pto_with_earliest_sent_time, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_arm_pto_with_earliest_sent_time, true, "If true, arm the 1st PTO with earliest in flight sent time.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_avoid_overestimate_bandwidth_with_aggregation, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_avoid_overestimate_bandwidth_with_aggregation, true, "If true, fix QUIC bandwidth sampler to avoid over estimating bandwidth in the " "presence of ack aggregation.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_batch_writer_flush_after_mtu_probe, true, - "If true and batch writer is used, QuicConnection will flush after a mtu probe is sent.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_add_ack_height_to_queueing_threshold, false, "If true, QUIC BBRv2 to take ack height into account when calculating " "queuing_threshold in PROBE_UP.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_avoid_unnecessary_probe_rtt, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_avoid_unnecessary_probe_rtt, true, "If true, QUIC BBRv2 to avoid unnecessary PROBE_RTTs after quiescence.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_donot_inject_bandwidth, true, @@ -88,6 +79,10 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_donot_inject_bandwidth, true, QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_fix_pacing_rate, true, "If true, re-calculate pacing rate when cwnd gets bootstrapped.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_bbr_fix_zero_bw_on_loss_only_event, false, + "If true, fix a bug in QUIC BBR where bandwidth estimate becomes 0 after a loss only event.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_flexible_app_limited, false, "When true and the BBR9 connection option is present, BBR only considers bandwidth " "samples app-limited if they're not filling the pipe.") @@ -108,16 +103,12 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_startup_rate_reduction, false, "When true, enables the BBS4 and BBS5 connection options, which reduce BBR's pacing " "rate in STARTUP as more losses occur as a fraction of CWND.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_better_mtu_packet_check, true, - "If true, QuicConnection will check MTU_DISCOVERY_FRAME in nonretransmittable_frames " - "to see if a packet is a MTU probe.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bundle_retransmittable_with_pto_ack, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bundle_retransmittable_with_pto_ack, true, "When the EACK connection option is sent by the client, an ack-eliciting frame is " "bundled with ACKs sent after the PTO fires.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_check_handshake_timeout_before_idle_timeout, true, - "If true, QuicConnection will check handshake timeout before idle timeout.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bw_sampler_app_limited_starting_value, false, + "If true, quic::BandwidthSampler will start in application limited phase.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, "If true, set burst token to 2 in cwnd bootstrapping experiment.") @@ -125,6 +116,10 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false, "If true, uses conservative cwnd gain and pacing gain when cwnd gets bootstrapped.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_create_incoming_stream_bug, false, + "If true, trigger QUIC_BUG in two ShouldCreateIncomingStream() overrides when called " + "with locally initiated stream ID.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_debug_wrong_qos, false, "If true, consider getting QoS after stream has been detached as GFE bug.") @@ -154,9 +149,6 @@ QUICHE_FLAG( bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false, "In v44 and above, where STOP_WAITING is never sent, close the connection if it's received.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_dispatch_data_when_disconnected, true, - "If true, do not dispatch data if connection disconnected.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false, "If true, stop resetting ideal_next_packet_send_time_ in pacing sender.") @@ -172,7 +164,7 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_25_v3, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_27, false, "If true, enable QUIC version h3-27.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_t050, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_t050, true, "If true, enable QUIC version T050.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") @@ -180,28 +172,24 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bbr_cwnd_in_bandwidth_resumption, true, "If true, adjust congestion window when doing bandwidth resumption in BBR.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_ignore_one_write_error_after_mtu_probe, false, + "If true, QUIC connection will ignore one packet write error after MTU probe.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, "If true, QuicListener::OnSocketIsWritable will always return false, which means there " "will never be a fake EPOLLOUT event in the next epoll iteration.") QUICHE_FLAG( - bool, quic_reloadable_flag_quic_minimum_validation_of_coalesced_packets, false, + bool, quic_reloadable_flag_quic_minimum_validation_of_coalesced_packets, true, "If true, only do minimum validation of coalesced packets (only validate connection ID).") QUICHE_FLAG(bool, quic_reloadable_flag_quic_negotiate_ack_delay_time, false, "If true, will negotiate the ACK delay time.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_neuter_unencrypted_control_frames, true, - "If true, neuter unencrypted control frames in QuicUnackedPacketMap::NeuterUnencryptedPackets.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false, "If true, transport connection stats doesn't report duplicated experiments for same " "connection.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_window_update_if_disconnected, true, - "If true, do not send WINDOW_UPDATE if connection has been disconnected.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_populate_mean_rtt_deviation_in_tcs, true, "If true, populate mean rtt deviation in transport connection stats.") @@ -211,7 +199,7 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_write_packed_strings, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, false, "If true, for L1 GFE, as requests come in, record frontend service to VIP mapping " - "which is used to announce VIP in SHLO for proxied sessions. ") + "which is used to announce VIP in SHLO for proxied sessions.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, "") @@ -219,6 +207,9 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, fals "If true, require handshake confirmation for QUIC connections, functionally disabling " "0-rtt handshakes.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_ping_when_pto_skips_packet_number, false, + "If true, send PING when PTO skips packet number and there is no data to send.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, "When the STMP connection option is sent by the client, timestamps in the QUIC ACK " "frame are sent and processed.") @@ -226,10 +217,6 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, "If true, enable server push feature on QUIC.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_set_send_algorithm_noop_if_cc_type_unchanged, true, - "If true, QuicSentPacketManager::SetSendAlgorithm(CongestionControlType) will become a " - "no-op if the current and the requested cc_type are the same.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_skip_packet_threshold_loss_detection_with_runt, false, "If true, skip packet threshold loss detection if largest acked is a runt.") @@ -264,14 +251,17 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_pigeon_sockets, false, "Use USPS Direct Path for QUIC egress.") QUICHE_FLAG( - bool, quic_reloadable_flag_quic_use_quic_time_for_received_timestamp2, false, + bool, quic_reloadable_flag_quic_use_quic_time_for_received_timestamp2, true, "If true, use QuicClock::Now() as the source of packet receive time instead of WallNow().") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_standard_deviation_for_pto, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_standard_deviation_for_pto, true, "If true, use standard deviation when calculating PTO timeout.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_write_with_transmission, false, + "If true, QuicSession's various write methods will set transmission type.") + QUICHE_FLAG( - bool, quic_reloadable_flag_quic_writevdata_at_level, false, + bool, quic_reloadable_flag_quic_writevdata_at_level, true, "If true, QuicSession::WritevData() will support writing data at a specified encryption level.") QUICHE_FLAG(bool, quic_reloadable_flag_send_quic_fallback_server_config_on_leto_error, false, @@ -303,6 +293,10 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, QUICHE_FLAG(bool, quic_restart_flag_quic_rx_ring_use_tpacket_v3, false, "If true, use TPACKET_V3 for QuicRxRing instead of TPACKET_V2.") +QUICHE_FLAG(bool, quic_restart_flag_quic_send_settings_on_write_key_available, false, + "If true, send H3 SETTINGs when 1-RTT write key is available (rather then both keys " + "are available).") + QUICHE_FLAG(bool, quic_restart_flag_quic_should_accept_new_connection, false, "If true, reject QUIC CHLO packets when dispatcher is asked to do so.") @@ -320,10 +314,6 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false, "If true, create a shared pigeon socket for all quic to backend connections and switch " "to use it after successful handshake.") -QUICHE_FLAG(bool, quic_restart_flag_quic_use_rxring_when_explicitly_specified, true, - "If true, GFE will use QuicRxRing for ingress only when ingress_method is explicitly " - "set to RXRING_INGRESS in quic_config.") - QUICHE_FLAG( bool, spdy_reloadable_flag_spdy_discard_response_body_if_disallowed, false, "If true, SPDY will discard all response body bytes when response code indicates no response " @@ -407,11 +397,11 @@ QUICHE_FLAG(int32_t, quic_bbr2_default_probe_bw_max_rand_duration_ms, 1000, QUICHE_FLAG(int32_t, quic_bbr2_default_probe_rtt_period_ms, 10000, "The default period for entering PROBE_RTT, in milliseconds.") -QUICHE_FLAG(double, quic_bbr2_default_loss_threshold, 0.3, +QUICHE_FLAG(double, quic_bbr2_default_loss_threshold, 0.02, "The default loss threshold for QUIC BBRv2, should be a value " "between 0 and 1.") -QUICHE_FLAG(int32_t, quic_bbr2_default_startup_full_loss_count, 2, +QUICHE_FLAG(int32_t, quic_bbr2_default_startup_full_loss_count, 8, "The default minimum number of loss marking events to exit STARTUP.") QUICHE_FLAG(int32_t, quic_bbr2_default_probe_bw_full_loss_count, 2, diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h index c61e40f8414a..b959d7f18b87 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.h @@ -146,7 +146,7 @@ class QuicLogEmitter { class NullLogStream : public std::ostream { public: - NullLogStream() : std::ostream(NULL) {} + NullLogStream() : std::ostream(nullptr) {} NullLogStream& stream() { return *this; } }; diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.cc index 8f71e7004066..c75f99c0bafb 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_impl.cc @@ -25,21 +25,19 @@ QuicMemSliceImpl::QuicMemSliceImpl(QuicUniqueBufferPtr buffer, size_t length) QuicMemSliceImpl::QuicMemSliceImpl(Envoy::Buffer::Instance& buffer, size_t length) { ASSERT(firstSliceLength(buffer) == length); single_slice_buffer_.move(buffer, length); - ASSERT(single_slice_buffer_.getRawSlices(nullptr, 0) == 1); + ASSERT(single_slice_buffer_.getRawSlices().size() == 1); } const char* QuicMemSliceImpl::data() const { - Envoy::Buffer::RawSlice out; - uint64_t num_slices = single_slice_buffer_.getRawSlices(&out, 1); - ASSERT(num_slices <= 1); - return static_cast(out.mem_); + Envoy::Buffer::RawSliceVector slices = single_slice_buffer_.getRawSlices(/*max_slices=*/1); + ASSERT(slices.size() <= 1); + return !slices.empty() ? static_cast(slices[0].mem_) : nullptr; } size_t QuicMemSliceImpl::firstSliceLength(Envoy::Buffer::Instance& buffer) { - Envoy::Buffer::RawSlice slice; - uint64_t total_num = buffer.getRawSlices(&slice, 1); - ASSERT(total_num != 0); - return slice.len_; + Envoy::Buffer::RawSliceVector slices = buffer.getRawSlices(/*max_slices=*/1); + ASSERT(slices.size() == 1); + return slices[0].len_; } } // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.cc index 40392e29cd99..c2eb527d6584 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.cc @@ -11,10 +11,8 @@ namespace quic { quiche::QuicheStringPiece QuicMemSliceSpanImpl::GetData(size_t index) { - uint64_t num_slices = buffer_->getRawSlices(nullptr, 0); - ASSERT(num_slices > index); - absl::FixedArray slices(num_slices); - buffer_->getRawSlices(slices.begin(), num_slices); + Envoy::Buffer::RawSliceVector slices = buffer_->getRawSlices(/*max_slices=*/index + 1); + ASSERT(slices.size() > index); return {reinterpret_cast(slices[index].mem_), slices[index].len_}; } diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h index dd9e2dde1c5d..60917fcd0d7c 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h @@ -45,7 +45,7 @@ class QuicMemSliceSpanImpl { // QuicMemSliceSpan quiche::QuicheStringPiece GetData(size_t index); QuicByteCount total_length() { return buffer_->length(); }; - size_t NumSlices() { return buffer_->getRawSlices(nullptr, 0); } + size_t NumSlices() { return buffer_->getRawSlices().size(); } template QuicByteCount ConsumeAll(ConsumeFunction consume); bool empty() const { return buffer_->length() == 0; } @@ -55,11 +55,8 @@ class QuicMemSliceSpanImpl { template QuicByteCount QuicMemSliceSpanImpl::ConsumeAll(ConsumeFunction consume) { - uint64_t num_slices = buffer_->getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - buffer_->getRawSlices(slices.begin(), num_slices); size_t saved_length = 0; - for (auto& slice : slices) { + for (auto& slice : buffer_->getRawSlices()) { if (slice.len_ == 0) { continue; } diff --git a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h index 1afcd7021a91..54c8e87a259d 100644 --- a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h +++ b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h @@ -87,10 +87,7 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase { void rawWrite(Buffer::Instance& data, bool end_stream) override; // Network::ReadBufferSource - Network::StreamBuffer getReadBuffer() override { - // Network filter has to stop iteration to prevent hitting this line. - NOT_REACHED_GCOVR_EXCL_LINE; - } + Network::StreamBuffer getReadBuffer() override { return {empty_buffer_, false}; } // Network::WriteBufferSource Network::StreamBuffer getWriteBuffer() override { NOT_REACHED_GCOVR_EXCL_LINE; } @@ -134,6 +131,7 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase { // stream write. QUICHE doesn't buffer data in connection, all the data is buffered in stream's // send buffer. EnvoyQuicSimulatedWatermarkBuffer write_buffer_watermark_simulation_; + Buffer::OwnedImpl empty_buffer_; }; } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h index 364f30f8a06e..6a468bf867be 100644 --- a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h +++ b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h @@ -1,3 +1,5 @@ +#include + #include "envoy/network/io_handle.h" #include "common/network/io_socket_error_impl.h" @@ -45,11 +47,22 @@ class QuicIoHandleWrapper : public Network::IoHandle { Api::IoCallUint64Result recvmsg(Buffer::RawSlice* slices, const uint64_t num_slice, uint32_t self_port, RecvMsgOutput& output) override { if (closed_) { + ASSERT(false, "recvmmsg is called after close."); return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF), Network::IoSocketError::deleteIoError)); } return io_handle_.recvmsg(slices, num_slice, self_port, output); } + Api::IoCallUint64Result recvmmsg(RawSliceArrays& slices, uint32_t self_port, + RecvMsgOutput& output) override { + if (closed_) { + ASSERT(false, "recvmmsg is called after close."); + return Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EBADF), + Network::IoSocketError::deleteIoError)); + } + return io_handle_.recvmmsg(slices, self_port, output); + } + bool supportsMmsg() const override { return io_handle_.supportsMmsg(); } private: Network::IoHandle& io_handle_; diff --git a/source/extensions/stat_sinks/hystrix/hystrix.cc b/source/extensions/stat_sinks/hystrix/hystrix.cc index 390913105c1e..e04fa22ae0d1 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.cc +++ b/source/extensions/stat_sinks/hystrix/hystrix.cc @@ -299,10 +299,8 @@ Http::Code HystrixSink::handlerHystrixEventStream(absl::string_view, admin_stream.getDecoderFilterCallbacks(); // Disable chunk-encoding in HTTP/1.x. - // TODO: This request should be propagated to codecs via API, instead of using a pseudo-header. - // See: https://github.com/envoyproxy/envoy/issues/9749 if (stream_decoder_filter_callbacks.streamInfo().protocol() < Http::Protocol::Http2) { - response_headers.setNoChunks(0); + admin_stream.http1StreamEncoderOptions().value().get().disableChunkEncoding(); } registerConnection(&stream_decoder_filter_callbacks); @@ -380,7 +378,7 @@ void HystrixSink::flush(Stats::MetricSnapshot& snapshot) { *cluster_stats_cache_ptr, cluster_info->name(), cluster_info->resourceManager(Upstream::ResourcePriority::Default).pendingRequests().max(), cluster_info->statsScope() - .gaugeFromStatName(membership_total_, Stats::Gauge::ImportMode::Accumulate) + .gaugeFromStatName(membership_total_, Stats::Gauge::ImportMode::NeverImport) .value(), server_.statsFlushInterval(), time_histograms[cluster_info->name()], ss); } diff --git a/source/extensions/stat_sinks/hystrix/hystrix.h b/source/extensions/stat_sinks/hystrix/hystrix.h index d8e2902bd362..e0daa8caae74 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.h +++ b/source/extensions/stat_sinks/hystrix/hystrix.h @@ -158,6 +158,8 @@ class HystrixSink : public Stats::Sink, public Logger::Loggable cluster_stats_cache_map_; // Saved StatNames for fast comparisons in loop. + // TODO(mattklein123): Many/all of these stats should just be pulled directly from the cluster + // stats directly. This needs some cleanup. Stats::StatNamePool stat_name_pool_; const Stats::StatName cluster_name_; const Stats::StatName cluster_upstream_rq_time_; diff --git a/source/extensions/tracers/common/factory_base.h b/source/extensions/tracers/common/factory_base.h index b3eec4254285..03e45d761559 100644 --- a/source/extensions/tracers/common/factory_base.h +++ b/source/extensions/tracers/common/factory_base.h @@ -14,7 +14,7 @@ namespace Common { template class FactoryBase : public Server::Configuration::TracerFactory { public: // Server::Configuration::TracerFactory - Tracing::HttpTracerPtr + Tracing::HttpTracerSharedPtr createHttpTracer(const Protobuf::Message& config, Server::Configuration::TracerFactoryContext& context) override { return createHttpTracerTyped(MessageUtil::downcastAndValidate( @@ -32,7 +32,7 @@ template class FactoryBase : public Server::Configuration::T FactoryBase(const std::string& name) : name_(name) {} private: - virtual Tracing::HttpTracerPtr + virtual Tracing::HttpTracerSharedPtr createHttpTracerTyped(const ConfigProto& proto_config, Server::Configuration::TracerFactoryContext& context) PURE; diff --git a/source/extensions/tracers/datadog/BUILD b/source/extensions/tracers/datadog/BUILD index 840ab08c00ad..ea38e6dfc778 100644 --- a/source/extensions/tracers/datadog/BUILD +++ b/source/extensions/tracers/datadog/BUILD @@ -22,7 +22,9 @@ envoy_cc_library( external_deps = ["dd_opentracing_cpp"], deps = [ "//source/common/config:utility_lib", + "//source/common/http:async_client_utility_lib", "//source/common/tracing:http_tracer_lib", + "//source/common/upstream:cluster_update_tracker_lib", "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common/ot:opentracing_driver_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", diff --git a/source/extensions/tracers/datadog/config.cc b/source/extensions/tracers/datadog/config.cc index e551cf4f4ffd..5b8908ed3ccf 100644 --- a/source/extensions/tracers/datadog/config.cc +++ b/source/extensions/tracers/datadog/config.cc @@ -19,14 +19,14 @@ namespace Datadog { DatadogTracerFactory::DatadogTracerFactory() : FactoryBase(TracerNames::get().Datadog) {} -Tracing::HttpTracerPtr DatadogTracerFactory::createHttpTracerTyped( +Tracing::HttpTracerSharedPtr DatadogTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::DatadogConfig& proto_config, Server::Configuration::TracerFactoryContext& context) { Tracing::DriverPtr datadog_driver = std::make_unique( proto_config, context.serverFactoryContext().clusterManager(), context.serverFactoryContext().scope(), context.serverFactoryContext().threadLocal(), context.serverFactoryContext().runtime()); - return std::make_unique(std::move(datadog_driver), + return std::make_shared(std::move(datadog_driver), context.serverFactoryContext().localInfo()); } diff --git a/source/extensions/tracers/datadog/config.h b/source/extensions/tracers/datadog/config.h index df3abb3dd73a..2453546405b0 100644 --- a/source/extensions/tracers/datadog/config.h +++ b/source/extensions/tracers/datadog/config.h @@ -21,7 +21,7 @@ class DatadogTracerFactory : public Common::FactoryBaseinfo(); + Config::Utility::checkCluster(TracerNames::get().Datadog, datadog_config.collector_cluster(), cm_, + /* allow_added_via_api */ true); + cluster_ = datadog_config.collector_cluster(); // Default tracer options. tracer_options_.operation_name_override = "envoy.proxy"; @@ -60,7 +60,8 @@ opentracing::Tracer& Driver::tracer() { return *tls_->getTyped().trac TraceReporter::TraceReporter(TraceEncoderSharedPtr encoder, Driver& driver, Event::Dispatcher& dispatcher) - : driver_(driver), encoder_(encoder) { + : driver_(driver), encoder_(encoder), + collector_cluster_(driver_.clusterManager(), driver_.cluster()) { flush_timer_ = dispatcher.createTimer([this]() -> void { for (auto& h : encoder_->headers()) { lower_case_headers_.emplace(h.first, Http::LowerCaseString{h.first}); @@ -89,7 +90,7 @@ void TraceReporter::flushTraces() { Http::RequestMessagePtr message(new Http::RequestMessageImpl()); message->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post); message->headers().setReferencePath(encoder_->path()); - message->headers().setReferenceHost(driver_.cluster()->name()); + message->headers().setReferenceHost(driver_.cluster()); for (auto& h : encoder_->headers()) { message->headers().setReferenceKey(lower_case_headers_.at(h.first), h.second); } @@ -100,21 +101,35 @@ void TraceReporter::flushTraces() { ENVOY_LOG(debug, "submitting {} trace(s) to {} with payload size {}", pendingTraces, encoder_->path(), encoder_->payload().size()); - driver_.clusterManager() - .httpAsyncClientForCluster(driver_.cluster()->name()) - .send(std::move(message), *this, - Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(1000U))); + if (collector_cluster_.exists()) { + Http::AsyncClient::Request* request = + driver_.clusterManager() + .httpAsyncClientForCluster(collector_cluster_.info()->name()) + .send( + std::move(message), *this, + Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(1000U))); + if (request) { + active_requests_.add(*request); + } + } else { + ENVOY_LOG(debug, "collector cluster '{}' does not exist", driver_.cluster()); + driver_.tracerStats().reports_skipped_no_cluster_.inc(); + } encoder_->clearTraces(); } } -void TraceReporter::onFailure(Http::AsyncClient::FailureReason) { +void TraceReporter::onFailure(const Http::AsyncClient::Request& request, + Http::AsyncClient::FailureReason) { + active_requests_.remove(request); ENVOY_LOG(debug, "failure submitting traces to datadog agent"); driver_.tracerStats().reports_failed_.inc(); } -void TraceReporter::onSuccess(Http::ResponseMessagePtr&& http_response) { +void TraceReporter::onSuccess(const Http::AsyncClient::Request& request, + Http::ResponseMessagePtr&& http_response) { + active_requests_.remove(request); uint64_t responseStatus = Http::Utility::getResponseStatus(http_response->headers()); if (responseStatus != enumToInt(Http::Code::OK)) { // TODO: Consider adding retries for failed submissions. diff --git a/source/extensions/tracers/datadog/datadog_tracer_impl.h b/source/extensions/tracers/datadog/datadog_tracer_impl.h index d48a8e0f13e7..774e34665a85 100644 --- a/source/extensions/tracers/datadog/datadog_tracer_impl.h +++ b/source/extensions/tracers/datadog/datadog_tracer_impl.h @@ -9,8 +9,10 @@ #include "envoy/tracing/http_tracer.h" #include "envoy/upstream/cluster_manager.h" +#include "common/http/async_client_utility.h" #include "common/http/header_map_impl.h" #include "common/json/json_loader.h" +#include "common/upstream/cluster_update_tracker.h" #include "extensions/tracers/common/ot/opentracing_driver_impl.h" @@ -22,6 +24,7 @@ namespace Datadog { #define DATADOG_TRACER_STATS(COUNTER) \ COUNTER(traces_sent) \ COUNTER(timer_flushed) \ + COUNTER(reports_skipped_no_cluster) \ COUNTER(reports_sent) \ COUNTER(reports_dropped) \ COUNTER(reports_failed) @@ -48,7 +51,7 @@ class Driver : public Common::Ot::OpenTracingDriver { // Getters to return the DatadogDriver's key members. Upstream::ClusterManager& clusterManager() { return cm_; } - Upstream::ClusterInfoConstSharedPtr cluster() { return cluster_; } + const std::string& cluster() { return cluster_; } Runtime::Loader& runtime() { return runtime_; } DatadogTracerStats& tracerStats() { return tracer_stats_; } const datadog::opentracing::TracerOptions& tracerOptions() { return tracer_options_; } @@ -73,7 +76,7 @@ class Driver : public Common::Ot::OpenTracingDriver { }; Upstream::ClusterManager& cm_; - Upstream::ClusterInfoConstSharedPtr cluster_; + std::string cluster_; DatadogTracerStats tracer_stats_; datadog::opentracing::TracerOptions tracer_options_; ThreadLocal::SlotPtr tls_; @@ -107,8 +110,8 @@ class TraceReporter : public Http::AsyncClient::Callbacks, TraceReporter(TraceEncoderSharedPtr encoder, Driver& driver, Event::Dispatcher& dispatcher); // Http::AsyncClient::Callbacks. - void onSuccess(Http::ResponseMessagePtr&&) override; - void onFailure(Http::AsyncClient::FailureReason) override; + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override; + void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override; private: /** @@ -127,6 +130,10 @@ class TraceReporter : public Http::AsyncClient::Callbacks, TraceEncoderSharedPtr encoder_; std::map lower_case_headers_; + + Upstream::ClusterUpdateTracker collector_cluster_; + // Track active HTTP requests to be able to cancel them on destruction. + Http::AsyncClientRequestTracker active_requests_; }; } // namespace Datadog } // namespace Tracers diff --git a/source/extensions/tracers/dynamic_ot/config.cc b/source/extensions/tracers/dynamic_ot/config.cc index 086561ad8345..54736ec04107 100644 --- a/source/extensions/tracers/dynamic_ot/config.cc +++ b/source/extensions/tracers/dynamic_ot/config.cc @@ -18,14 +18,14 @@ namespace DynamicOt { DynamicOpenTracingTracerFactory::DynamicOpenTracingTracerFactory() : FactoryBase(TracerNames::get().DynamicOt) {} -Tracing::HttpTracerPtr DynamicOpenTracingTracerFactory::createHttpTracerTyped( +Tracing::HttpTracerSharedPtr DynamicOpenTracingTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::DynamicOtConfig& proto_config, Server::Configuration::TracerFactoryContext& context) { const std::string& library = proto_config.library(); const std::string config = MessageUtil::getJsonStringFromMessage(proto_config.config()); Tracing::DriverPtr dynamic_driver = std::make_unique( context.serverFactoryContext().scope(), library, config); - return std::make_unique(std::move(dynamic_driver), + return std::make_shared(std::move(dynamic_driver), context.serverFactoryContext().localInfo()); } diff --git a/source/extensions/tracers/dynamic_ot/config.h b/source/extensions/tracers/dynamic_ot/config.h index d64a42a5f4f3..58fff56e4243 100644 --- a/source/extensions/tracers/dynamic_ot/config.h +++ b/source/extensions/tracers/dynamic_ot/config.h @@ -20,7 +20,7 @@ class DynamicOpenTracingTracerFactory private: // FactoryBase - Tracing::HttpTracerPtr + Tracing::HttpTracerSharedPtr createHttpTracerTyped(const envoy::config::trace::v3::DynamicOtConfig& configuration, Server::Configuration::TracerFactoryContext& context) override; }; diff --git a/source/extensions/tracers/lightstep/BUILD b/source/extensions/tracers/lightstep/BUILD index a92a3a6f5ddd..a72d39b37376 100644 --- a/source/extensions/tracers/lightstep/BUILD +++ b/source/extensions/tracers/lightstep/BUILD @@ -25,6 +25,7 @@ envoy_cc_library( "//source/common/grpc:context_lib", "//source/common/stats:symbol_table_lib", "//source/common/tracing:http_tracer_lib", + "//source/common/upstream:cluster_update_tracker_lib", "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common/ot:opentracing_driver_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", diff --git a/source/extensions/tracers/lightstep/config.cc b/source/extensions/tracers/lightstep/config.cc index 989000fd1da7..52ecaec58c33 100644 --- a/source/extensions/tracers/lightstep/config.cc +++ b/source/extensions/tracers/lightstep/config.cc @@ -19,7 +19,7 @@ namespace Lightstep { LightstepTracerFactory::LightstepTracerFactory() : FactoryBase(TracerNames::get().Lightstep) {} -Tracing::HttpTracerPtr LightstepTracerFactory::createHttpTracerTyped( +Tracing::HttpTracerSharedPtr LightstepTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::LightstepConfig& proto_config, Server::Configuration::TracerFactoryContext& context) { auto opts = std::make_unique(); @@ -35,7 +35,7 @@ Tracing::HttpTracerPtr LightstepTracerFactory::createHttpTracerTyped( context.serverFactoryContext().runtime(), std::move(opts), Common::Ot::OpenTracingDriver::PropagationMode::TracerNative, context.serverFactoryContext().grpcContext()); - return std::make_unique(std::move(lightstep_driver), + return std::make_shared(std::move(lightstep_driver), context.serverFactoryContext().localInfo()); } diff --git a/source/extensions/tracers/lightstep/config.h b/source/extensions/tracers/lightstep/config.h index 84e3d91396f5..771986998cd1 100644 --- a/source/extensions/tracers/lightstep/config.h +++ b/source/extensions/tracers/lightstep/config.h @@ -20,7 +20,7 @@ class LightstepTracerFactory private: // FactoryBase - Tracing::HttpTracerPtr + Tracing::HttpTracerSharedPtr createHttpTracerTyped(const envoy::config::trace::v3::LightstepConfig& proto_config, Server::Configuration::TracerFactoryContext& context) override; }; diff --git a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc index 2dd99f6f577e..0f4b1a89e378 100644 --- a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc +++ b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc @@ -36,6 +36,34 @@ static Buffer::InstancePtr serializeGrpcMessage(const lightstep::BufferChain& bu return body; } +static std::vector +MakePropagationModes(const envoy::config::trace::v3::LightstepConfig& lightstep_config) { + if (lightstep_config.propagation_modes().empty()) { + return {lightstep::PropagationMode::envoy}; + } + std::vector result; + result.reserve(lightstep_config.propagation_modes().size()); + for (auto propagation_mode : lightstep_config.propagation_modes()) { + switch (propagation_mode) { + case envoy::config::trace::v3::LightstepConfig::ENVOY: + result.push_back(lightstep::PropagationMode::envoy); + break; + case envoy::config::trace::v3::LightstepConfig::LIGHTSTEP: + result.push_back(lightstep::PropagationMode::lightstep); + break; + case envoy::config::trace::v3::LightstepConfig::B3: + result.push_back(lightstep::PropagationMode::b3); + break; + case envoy::config::trace::v3::LightstepConfig::TRACE_CONTEXT: + result.push_back(lightstep::PropagationMode::trace_context); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + return result; +} + void LightStepLogger::operator()(lightstep::LogLevel level, opentracing::string_view message) const { const fmt::string_view fmt_message{message.data(), message.size()}; @@ -60,7 +88,7 @@ void LightStepLogger::operator()(lightstep::LogLevel level, const size_t LightStepDriver::DefaultMinFlushSpans = 200U; LightStepDriver::LightStepTransporter::LightStepTransporter(LightStepDriver& driver) - : driver_(driver) {} + : driver_(driver), collector_cluster_(driver_.clusterManager(), driver_.cluster()) {} LightStepDriver::LightStepTransporter::~LightStepTransporter() { if (active_request_ != nullptr) { @@ -68,15 +96,16 @@ LightStepDriver::LightStepTransporter::~LightStepTransporter() { } } -void LightStepDriver::LightStepTransporter::onSuccess(Http::ResponseMessagePtr&& /*response*/) { - driver_.grpc_context_.chargeStat(*driver_.cluster(), driver_.request_names_, true); +void LightStepDriver::LightStepTransporter::onSuccess(const Http::AsyncClient::Request&, + Http::ResponseMessagePtr&& /*response*/) { + driver_.grpc_context_.chargeStat(*active_cluster_, driver_.request_stat_names_, true); active_callback_->OnSuccess(*active_report_); reset(); } void LightStepDriver::LightStepTransporter::onFailure( - Http::AsyncClient::FailureReason /*failure_reason*/) { - driver_.grpc_context_.chargeStat(*driver_.cluster(), driver_.request_names_, false); + const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason /*failure_reason*/) { + driver_.grpc_context_.chargeStat(*active_cluster_, driver_.request_stat_names_, false); active_callback_->OnFailure(*active_report_); reset(); } @@ -94,24 +123,31 @@ void LightStepDriver::LightStepTransporter::Send(std::unique_ptrname(), lightstep::CollectorServiceFullName(), - lightstep::CollectorMethodName(), absl::optional(timeout)); - message->body() = serializeGrpcMessage(*active_report_); - - active_request_ = - driver_.clusterManager() - .httpAsyncClientForCluster(driver_.cluster()->name()) - .send(std::move(message), *this, - Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(timeout))); + driver_.cluster(), lightstep::CollectorServiceFullName(), lightstep::CollectorMethodName(), + absl::optional(timeout)); + message->body() = serializeGrpcMessage(*report); + + if (collector_cluster_.exists()) { + active_report_ = std::move(report); + active_callback_ = &callback; + active_cluster_ = collector_cluster_.info(); + active_request_ = driver_.clusterManager() + .httpAsyncClientForCluster(collector_cluster_.info()->name()) + .send(std::move(message), *this, + Http::AsyncClient::RequestOptions().setTimeout( + std::chrono::milliseconds(timeout))); + } else { + ENVOY_LOG(debug, "collector cluster '{}' does not exist", driver_.cluster()); + driver_.tracerStats().reports_skipped_no_cluster_.inc(); + } } void LightStepDriver::LightStepTransporter::reset() { + active_cluster_ = nullptr; active_request_ = nullptr; active_callback_ = nullptr; active_report_ = nullptr; @@ -158,24 +194,28 @@ LightStepDriver::LightStepDriver(const envoy::config::trace::v3::LightstepConfig tracer_stats_{LIGHTSTEP_TRACER_STATS(POOL_COUNTER_PREFIX(scope, "tracing.lightstep."))}, tls_{tls.allocateSlot()}, runtime_{runtime}, options_{std::move(options)}, propagation_mode_{propagation_mode}, grpc_context_(grpc_context), - pool_(scope.symbolTable()), request_names_{pool_.add(lightstep::CollectorServiceFullName()), - pool_.add(lightstep::CollectorMethodName())} { + pool_(scope.symbolTable()), request_stat_names_{ + pool_.add(lightstep::CollectorServiceFullName()), + pool_.add(lightstep::CollectorMethodName())} { Config::Utility::checkCluster(TracerNames::get().Lightstep, lightstep_config.collector_cluster(), - cm_); - cluster_ = cm_.get(lightstep_config.collector_cluster())->info(); + cm_, /* allow_added_via_api */ true); + cluster_ = lightstep_config.collector_cluster(); - if (!(cluster_->features() & Upstream::ClusterInfo::Features::HTTP2)) { + if (!(cm_.get(cluster_)->info()->features() & Upstream::ClusterInfo::Features::HTTP2)) { throw EnvoyException( - fmt::format("{} collector cluster must support http2 for gRPC calls", cluster_->name())); + fmt::format("{} collector cluster must support http2 for gRPC calls", cluster_)); } - tls_->set([this](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { + auto propagation_modes = MakePropagationModes(lightstep_config); + + tls_->set([this, propagation_modes = std::move(propagation_modes)]( + Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { lightstep::LightStepTracerOptions tls_options; tls_options.access_token = options_->access_token; tls_options.component_name = options_->component_name; tls_options.use_thread = false; - tls_options.use_single_key_propagation = true; + tls_options.propagation_modes = propagation_modes; tls_options.logger_sink = LightStepLogger{}; tls_options.max_buffered_spans = std::function{[this] { diff --git a/source/extensions/tracers/lightstep/lightstep_tracer_impl.h b/source/extensions/tracers/lightstep/lightstep_tracer_impl.h index 41ec6c944dd3..ee572f47f6eb 100644 --- a/source/extensions/tracers/lightstep/lightstep_tracer_impl.h +++ b/source/extensions/tracers/lightstep/lightstep_tracer_impl.h @@ -17,6 +17,7 @@ #include "common/json/json_loader.h" #include "common/protobuf/protobuf.h" #include "common/stats/symbol_table_impl.h" +#include "common/upstream/cluster_update_tracker.h" #include "extensions/tracers/common/ot/opentracing_driver_impl.h" @@ -33,7 +34,8 @@ namespace Lightstep { #define LIGHTSTEP_TRACER_STATS(COUNTER) \ COUNTER(spans_sent) \ COUNTER(spans_dropped) \ - COUNTER(timer_flushed) + COUNTER(timer_flushed) \ + COUNTER(reports_skipped_no_cluster) struct LightstepTracerStats { LIGHTSTEP_TRACER_STATS(GENERATE_COUNTER_STRUCT) @@ -62,7 +64,7 @@ class LightStepDriver : public Common::Ot::OpenTracingDriver { PropagationMode propagation_mode, Grpc::Context& grpc_context); Upstream::ClusterManager& clusterManager() { return cm_; } - Upstream::ClusterInfoConstSharedPtr cluster() { return cluster_; } + const std::string& cluster() { return cluster_; } Runtime::Loader& runtime() { return runtime_; } LightstepTracerStats& tracerStats() { return tracer_stats_; } @@ -75,7 +77,9 @@ class LightStepDriver : public Common::Ot::OpenTracingDriver { PropagationMode propagationMode() const override { return propagation_mode_; } private: - class LightStepTransporter : public lightstep::AsyncTransporter, Http::AsyncClient::Callbacks { + class LightStepTransporter : Logger::Loggable, + public lightstep::AsyncTransporter, + public Http::AsyncClient::Callbacks { public: explicit LightStepTransporter(LightStepDriver& driver); @@ -88,14 +92,17 @@ class LightStepDriver : public Common::Ot::OpenTracingDriver { Callback& callback) noexcept override; // Http::AsyncClient::Callbacks - void onSuccess(Http::ResponseMessagePtr&& response) override; - void onFailure(Http::AsyncClient::FailureReason failure_reason) override; + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override; + void onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason failure_reason) override; private: std::unique_ptr active_report_; Callback* active_callback_ = nullptr; + Upstream::ClusterInfoConstSharedPtr active_cluster_; Http::AsyncClient::Request* active_request_ = nullptr; LightStepDriver& driver_; + Upstream::ClusterUpdateTracker collector_cluster_; void reset(); }; @@ -128,7 +135,7 @@ class LightStepDriver : public Common::Ot::OpenTracingDriver { }; Upstream::ClusterManager& cm_; - Upstream::ClusterInfoConstSharedPtr cluster_; + std::string cluster_; LightstepTracerStats tracer_stats_; ThreadLocal::SlotPtr tls_; Runtime::Loader& runtime_; @@ -136,7 +143,7 @@ class LightStepDriver : public Common::Ot::OpenTracingDriver { const PropagationMode propagation_mode_; Grpc::Context& grpc_context_; Stats::StatNamePool pool_; - const Grpc::Context::RequestNames request_names_; + const Grpc::Context::RequestStatNames request_stat_names_; }; } // namespace Lightstep } // namespace Tracers diff --git a/source/extensions/tracers/opencensus/config.cc b/source/extensions/tracers/opencensus/config.cc index a593d7504428..27f90ad21069 100644 --- a/source/extensions/tracers/opencensus/config.cc +++ b/source/extensions/tracers/opencensus/config.cc @@ -16,14 +16,25 @@ namespace OpenCensus { OpenCensusTracerFactory::OpenCensusTracerFactory() : FactoryBase(TracerNames::get().OpenCensus) {} -Tracing::HttpTracerPtr OpenCensusTracerFactory::createHttpTracerTyped( +Tracing::HttpTracerSharedPtr OpenCensusTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::OpenCensusConfig& proto_config, Server::Configuration::TracerFactoryContext& context) { + // Since OpenCensus can only support a single tracing configuration per entire process, + // we need to make sure that it is configured at most once. + if (tracer_) { + if (Envoy::Protobuf::util::MessageDifferencer::Equals(config_, proto_config)) { + return tracer_; + } else { + throw EnvoyException("Opencensus has already been configured with a different config."); + } + } Tracing::DriverPtr driver = std::make_unique(proto_config, context.serverFactoryContext().localInfo(), context.serverFactoryContext().api()); - return std::make_unique(std::move(driver), - context.serverFactoryContext().localInfo()); + tracer_ = std::make_shared(std::move(driver), + context.serverFactoryContext().localInfo()); + config_ = proto_config; + return tracer_; } /** diff --git a/source/extensions/tracers/opencensus/config.h b/source/extensions/tracers/opencensus/config.h index ac35cec54388..14fcf0176ae6 100644 --- a/source/extensions/tracers/opencensus/config.h +++ b/source/extensions/tracers/opencensus/config.h @@ -22,9 +22,14 @@ class OpenCensusTracerFactory private: // FactoryBase - Tracing::HttpTracerPtr + Tracing::HttpTracerSharedPtr createHttpTracerTyped(const envoy::config::trace::v3::OpenCensusConfig& proto_config, Server::Configuration::TracerFactoryContext& context) override; + + // Since OpenCensus can only support a single tracing configuration per entire process, + // we need to make sure that it is configured at most once. + Tracing::HttpTracerSharedPtr tracer_; + envoy::config::trace::v3::OpenCensusConfig config_; }; } // namespace OpenCensus diff --git a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc index 1e499821c204..00a13fb0e69c 100644 --- a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc +++ b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc @@ -241,6 +241,17 @@ void Span::setSampled(bool sampled) { span_.AddAnnotation("setSampled", {{"sampl Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, const LocalInfo::LocalInfo& localinfo, Api::Api& api) : oc_config_(oc_config), local_info_(localinfo) { + // To give user a chance to correct initially invalid configuration and try to apply it once again + // without a need to restart Envoy, validation checks must be done prior to any side effects. + if (oc_config.stackdriver_exporter_enabled() && oc_config.has_stackdriver_grpc_service() && + !oc_config.stackdriver_grpc_service().has_google_grpc()) { + throw EnvoyException("Opencensus stackdriver tracer only support GoogleGrpc."); + } + if (oc_config.ocagent_exporter_enabled() && oc_config.has_ocagent_grpc_service() && + !oc_config.ocagent_grpc_service().has_google_grpc()) { + throw EnvoyException("Opencensus ocagent tracer only supports GoogleGrpc."); + } + // Process-wide side effects. if (oc_config.has_trace_config()) { applyTraceConfig(oc_config.trace_config()); } @@ -254,10 +265,8 @@ Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, auto channel = grpc::CreateChannel(oc_config.stackdriver_address(), grpc::InsecureChannelCredentials()); opts.trace_service_stub = ::google::devtools::cloudtrace::v2::TraceService::NewStub(channel); - } else if (oc_config.has_stackdriver_grpc_service()) { - if (!oc_config.stackdriver_grpc_service().has_google_grpc()) { - throw EnvoyException("Opencensus stackdriver tracer only support GoogleGrpc."); - } + } else if (oc_config.has_stackdriver_grpc_service() && + oc_config.stackdriver_grpc_service().has_google_grpc()) { envoy::config::core::v3::GrpcService stackdriver_service = oc_config.stackdriver_grpc_service(); if (stackdriver_service.google_grpc().target_uri().empty()) { @@ -279,11 +288,10 @@ Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, ::opencensus::exporters::trace::OcAgentOptions opts; if (!oc_config.ocagent_address().empty()) { opts.address = oc_config.ocagent_address(); - } else if (oc_config.has_ocagent_grpc_service()) { - if (!oc_config.ocagent_grpc_service().has_google_grpc()) { - throw EnvoyException("Opencensus ocagent tracer only supports GoogleGrpc."); - } - envoy::config::core::v3::GrpcService ocagent_service = oc_config.ocagent_grpc_service(); + } else if (oc_config.has_ocagent_grpc_service() && + oc_config.ocagent_grpc_service().has_google_grpc()) { + const envoy::config::core::v3::GrpcService& ocagent_service = + oc_config.ocagent_grpc_service(); auto channel = Envoy::Grpc::GoogleGrpcUtils::createChannel(ocagent_service, api); opts.trace_service_stub = ::opencensus::proto::agent::trace::v1::TraceService::NewStub(channel); diff --git a/source/extensions/tracers/xray/BUILD b/source/extensions/tracers/xray/BUILD index 95a6d54737ee..f225797780d3 100644 --- a/source/extensions/tracers/xray/BUILD +++ b/source/extensions/tracers/xray/BUILD @@ -47,6 +47,7 @@ envoy_cc_library( "//source/common/http:header_map_lib", "//source/common/json:json_loader_lib", "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_lib", "//source/common/tracing:http_tracer_lib", ], ) diff --git a/source/extensions/tracers/xray/config.cc b/source/extensions/tracers/xray/config.cc index aa35147aa6a5..ad4bfc0ebcfe 100644 --- a/source/extensions/tracers/xray/config.cc +++ b/source/extensions/tracers/xray/config.cc @@ -21,7 +21,7 @@ namespace XRay { XRayTracerFactory::XRayTracerFactory() : FactoryBase(TracerNames::get().XRay) {} -Tracing::HttpTracerPtr +Tracing::HttpTracerSharedPtr XRayTracerFactory::createHttpTracerTyped(const envoy::config::trace::v3::XRayConfig& proto_config, Server::Configuration::TracerFactoryContext& context) { std::string sampling_rules_json; @@ -47,7 +47,7 @@ XRayTracerFactory::createHttpTracerTyped(const envoy::config::trace::v3::XRayCon XRayConfiguration xconfig{endpoint, proto_config.segment_name(), sampling_rules_json}; auto xray_driver = std::make_unique(xconfig, context); - return std::make_unique(std::move(xray_driver), + return std::make_shared(std::move(xray_driver), context.serverFactoryContext().localInfo()); } diff --git a/source/extensions/tracers/xray/config.h b/source/extensions/tracers/xray/config.h index 8813f8c8be99..d8634ccc0291 100644 --- a/source/extensions/tracers/xray/config.h +++ b/source/extensions/tracers/xray/config.h @@ -21,7 +21,7 @@ class XRayTracerFactory : public Common::FactoryBasemutable_response()->insert(KeyValue{item.first, item.second}); } - // TODO(marcomagdy): test how expensive this validation is. Might be worth turning off in - // optimized builds.. - MessageUtil::validate(s, ProtobufMessage::getStrictValidationVisitor()); - Protobuf::util::JsonPrintOptions json_options; - json_options.preserve_proto_field_names = true; - std::string json; - const auto status = Protobuf::util::MessageToJsonString(s, &json, json_options); - ASSERT(status.ok()); + const std::string json = MessageUtil::getJsonStringFromMessage( + s, false /* pretty_print */, false /* always_print_primitive_fields */); + broker_.send(json); } // namespace XRay void Span::injectContext(Http::RequestHeaderMap& request_headers) { const std::string xray_header_value = - fmt::format("root={};parent={};sampled={}", traceId(), Id(), sampled() ? "1" : "0"); - - // Set the XRay header into envoy header map for visibility to upstream - request_headers.addCopy(Http::LowerCaseString(XRayTraceHeader), xray_header_value); + fmt::format("Root={};Parent={};Sampled={}", traceId(), Id(), sampled() ? "1" : "0"); + request_headers.setCopy(Http::LowerCaseString(XRayTraceHeader), xray_header_value); } Tracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& operation_name, @@ -113,7 +106,7 @@ Tracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& ope auto child_span = std::make_unique(time_source_, broker_); const auto ticks = time_source_.monotonicTime().time_since_epoch().count(); child_span->setId(ticks); - child_span->setName(operation_name); + child_span->setName(name()); child_span->setOperation(operation_name); child_span->setStartTime(start_time); child_span->setParentId(Id()); diff --git a/source/extensions/tracers/xray/xray_tracer_impl.cc b/source/extensions/tracers/xray/xray_tracer_impl.cc index c65a2aad93c6..d0ecb0684e25 100644 --- a/source/extensions/tracers/xray/xray_tracer_impl.cc +++ b/source/extensions/tracers/xray/xray_tracer_impl.cc @@ -52,12 +52,8 @@ Driver::Driver(const XRayConfiguration& config, tls_slot_ptr_->set([this, daemon_endpoint, &context](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { - std::string span_name = xray_config_.segment_name_.empty() - ? context.serverFactoryContext().localInfo().clusterName() - : xray_config_.segment_name_; - DaemonBrokerPtr broker = std::make_unique(daemon_endpoint); - TracerPtr tracer = std::make_unique(span_name, std::move(broker), + TracerPtr tracer = std::make_unique(xray_config_.segment_name_, std::move(broker), context.serverFactoryContext().timeSource()); return std::make_shared(std::move(tracer), *this); }); diff --git a/source/extensions/tracers/zipkin/BUILD b/source/extensions/tracers/zipkin/BUILD index 5ae922000017..f2321bab8710 100644 --- a/source/extensions/tracers/zipkin/BUILD +++ b/source/extensions/tracers/zipkin/BUILD @@ -49,6 +49,7 @@ envoy_cc_library( "//source/common/common:hex_lib", "//source/common/common:utility_lib", "//source/common/config:utility_lib", + "//source/common/http:async_client_utility_lib", "//source/common/http:header_map_lib", "//source/common/http:message_lib", "//source/common/http:utility_lib", @@ -56,6 +57,7 @@ envoy_cc_library( "//source/common/network:address_lib", "//source/common/singleton:const_singleton", "//source/common/tracing:http_tracer_lib", + "//source/common/upstream:cluster_update_tracker_lib", "//source/extensions/tracers:well_known_names", "@com_github_openzipkin_zipkinapi//:zipkin_cc_proto", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", diff --git a/source/extensions/tracers/zipkin/config.cc b/source/extensions/tracers/zipkin/config.cc index c3d1c3ed7e60..663b8f950d04 100644 --- a/source/extensions/tracers/zipkin/config.cc +++ b/source/extensions/tracers/zipkin/config.cc @@ -17,7 +17,7 @@ namespace Zipkin { ZipkinTracerFactory::ZipkinTracerFactory() : FactoryBase(TracerNames::get().Zipkin) {} -Tracing::HttpTracerPtr ZipkinTracerFactory::createHttpTracerTyped( +Tracing::HttpTracerSharedPtr ZipkinTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::ZipkinConfig& proto_config, Server::Configuration::TracerFactoryContext& context) { Tracing::DriverPtr zipkin_driver = std::make_unique( @@ -26,7 +26,7 @@ Tracing::HttpTracerPtr ZipkinTracerFactory::createHttpTracerTyped( context.serverFactoryContext().runtime(), context.serverFactoryContext().localInfo(), context.serverFactoryContext().random(), context.serverFactoryContext().timeSource()); - return std::make_unique(std::move(zipkin_driver), + return std::make_shared(std::move(zipkin_driver), context.serverFactoryContext().localInfo()); } diff --git a/source/extensions/tracers/zipkin/config.h b/source/extensions/tracers/zipkin/config.h index 4907bd196c75..7ae4337a07aa 100644 --- a/source/extensions/tracers/zipkin/config.h +++ b/source/extensions/tracers/zipkin/config.h @@ -19,7 +19,7 @@ class ZipkinTracerFactory : public Common::FactoryBaseinfo(); + Config::Utility::checkCluster(TracerNames::get().Zipkin, zipkin_config.collector_cluster(), cm_, + /* allow_added_via_api */ true); + cluster_ = zipkin_config.collector_cluster(); CollectorInfo collector; if (!zipkin_config.collector_endpoint().empty()) { @@ -133,7 +134,8 @@ ReporterImpl::ReporterImpl(Driver& driver, Event::Dispatcher& dispatcher, const CollectorInfo& collector) : driver_(driver), collector_(collector), span_buffer_{std::make_unique( - collector.version_, collector.shared_span_context_)} { + collector.version_, collector.shared_span_context_)}, + collector_cluster_(driver_.clusterManager(), driver_.cluster()) { flush_timer_ = dispatcher.createTimer([this]() -> void { driver_.tracerStats().timer_flushed_.inc(); flushSpans(); @@ -176,7 +178,7 @@ void ReporterImpl::flushSpans() { Http::RequestMessagePtr message = std::make_unique(); message->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post); message->headers().setPath(collector_.endpoint_); - message->headers().setHost(driver_.cluster()->name()); + message->headers().setHost(driver_.cluster()); message->headers().setReferenceContentType( collector_.version_ == envoy::config::trace::v3::ZipkinConfig::HTTP_PROTO ? Http::Headers::get().ContentTypeValues.Protobuf @@ -188,20 +190,35 @@ void ReporterImpl::flushSpans() { const uint64_t timeout = driver_.runtime().snapshot().getInteger("tracing.zipkin.request_timeout", 5000U); - driver_.clusterManager() - .httpAsyncClientForCluster(driver_.cluster()->name()) - .send(std::move(message), *this, - Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(timeout))); + + if (collector_cluster_.exists()) { + Http::AsyncClient::Request* request = + driver_.clusterManager() + .httpAsyncClientForCluster(collector_cluster_.info()->name()) + .send(std::move(message), *this, + Http::AsyncClient::RequestOptions().setTimeout( + std::chrono::milliseconds(timeout))); + if (request) { + active_requests_.add(*request); + } + } else { + ENVOY_LOG(debug, "collector cluster '{}' does not exist", driver_.cluster()); + driver_.tracerStats().reports_skipped_no_cluster_.inc(); + } span_buffer_->clear(); } } -void ReporterImpl::onFailure(Http::AsyncClient::FailureReason) { +void ReporterImpl::onFailure(const Http::AsyncClient::Request& request, + Http::AsyncClient::FailureReason) { + active_requests_.remove(request); driver_.tracerStats().reports_failed_.inc(); } -void ReporterImpl::onSuccess(Http::ResponseMessagePtr&& http_response) { +void ReporterImpl::onSuccess(const Http::AsyncClient::Request& request, + Http::ResponseMessagePtr&& http_response) { + active_requests_.remove(request); if (Http::Utility::getResponseStatus(http_response->headers()) != enumToInt(Http::Code::Accepted)) { driver_.tracerStats().reports_dropped_.inc(); diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h index 7c8881d23b9a..36866fd52b9e 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h @@ -7,8 +7,10 @@ #include "envoy/tracing/http_tracer.h" #include "envoy/upstream/cluster_manager.h" +#include "common/http/async_client_utility.h" #include "common/http/header_map_impl.h" #include "common/json/json_loader.h" +#include "common/upstream/cluster_update_tracker.h" #include "extensions/tracers/zipkin/span_buffer.h" #include "extensions/tracers/zipkin/tracer.h" @@ -22,6 +24,7 @@ namespace Zipkin { #define ZIPKIN_TRACER_STATS(COUNTER) \ COUNTER(spans_sent) \ COUNTER(timer_flushed) \ + COUNTER(reports_skipped_no_cluster) \ COUNTER(reports_sent) \ COUNTER(reports_dropped) \ COUNTER(reports_failed) @@ -115,7 +118,7 @@ class Driver : public Tracing::Driver { // Getters to return the ZipkinDriver's key members. Upstream::ClusterManager& clusterManager() { return cm_; } - Upstream::ClusterInfoConstSharedPtr cluster() { return cluster_; } + const std::string& cluster() { return cluster_; } Runtime::Loader& runtime() { return runtime_; } ZipkinTracerStats& tracerStats() { return tracer_stats_; } @@ -131,7 +134,7 @@ class Driver : public Tracing::Driver { }; Upstream::ClusterManager& cm_; - Upstream::ClusterInfoConstSharedPtr cluster_; + std::string cluster_; ZipkinTracerStats tracer_stats_; ThreadLocal::SlotPtr tls_; Runtime::Loader& runtime_; @@ -170,7 +173,9 @@ struct CollectorInfo { * * The default values for the runtime parameters are 5 spans and 5000ms. */ -class ReporterImpl : public Reporter, Http::AsyncClient::Callbacks { +class ReporterImpl : Logger::Loggable, + public Reporter, + public Http::AsyncClient::Callbacks { public: /** * Constructor. @@ -194,8 +199,8 @@ class ReporterImpl : public Reporter, Http::AsyncClient::Callbacks { // Http::AsyncClient::Callbacks. // The callbacks below record Zipkin-span-related stats. - void onSuccess(Http::ResponseMessagePtr&&) override; - void onFailure(Http::AsyncClient::FailureReason) override; + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override; + void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override; /** * Creates a heap-allocated ZipkinReporter. @@ -226,6 +231,9 @@ class ReporterImpl : public Reporter, Http::AsyncClient::Callbacks { Event::TimerPtr flush_timer_; const CollectorInfo collector_; SpanBufferPtr span_buffer_; + Upstream::ClusterUpdateTracker collector_cluster_; + // Track active HTTP requests to be able to cancel them on destruction. + Http::AsyncClientRequestTracker active_requests_; }; } // namespace Zipkin } // namespace Tracers diff --git a/source/extensions/transport_sockets/tls/context_config_impl.cc b/source/extensions/transport_sockets/tls/context_config_impl.cc index 36bbb867c33e..793adc7597de 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.cc +++ b/source/extensions/transport_sockets/tls/context_config_impl.cc @@ -137,6 +137,8 @@ Secret::TlsSessionTicketKeysConfigProviderSharedPtr getTlsSessionTicketKeysConfi return secret_provider; } } + case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext:: + SessionTicketKeysTypeCase::kDisableStatelessSessionResumption: case envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext:: SessionTicketKeysTypeCase::SESSION_TICKET_KEYS_TYPE_NOT_SET: return nullptr; @@ -146,6 +148,17 @@ Secret::TlsSessionTicketKeysConfigProviderSharedPtr getTlsSessionTicketKeysConfi } } +bool getStatelessSessionResumptionDisabled( + const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext& config) { + if (config.session_ticket_keys_type_case() == + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext:: + SessionTicketKeysTypeCase::kDisableStatelessSessionResumption) { + return config.disable_stateless_session_resumption(); + } else { + return false; + } +} + } // namespace ContextConfigImpl::ContextConfigImpl( @@ -379,8 +392,9 @@ ServerContextConfigImpl::ServerContextConfigImpl( DEFAULT_CIPHER_SUITES, DEFAULT_CURVES, factory_context), require_client_certificate_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, require_client_certificate, false)), - session_ticket_keys_provider_( - getTlsSessionTicketKeysConfigProvider(factory_context, config)) { + session_ticket_keys_provider_(getTlsSessionTicketKeysConfigProvider(factory_context, config)), + disable_stateless_session_resumption_(getStatelessSessionResumptionDisabled(config)) { + if (session_ticket_keys_provider_ != nullptr) { // Validate tls session ticket keys early to reject bad sds updates. stk_validation_callback_handle_ = session_ticket_keys_provider_->addValidationCallback( diff --git a/source/extensions/transport_sockets/tls/context_config_impl.h b/source/extensions/transport_sockets/tls/context_config_impl.h index 1abfe488fee2..9cfaff0482fb 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.h +++ b/source/extensions/transport_sockets/tls/context_config_impl.h @@ -147,6 +147,9 @@ class ServerContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::Ser } void setSecretUpdateCallback(std::function callback) override; + bool disableStatelessSessionResumption() const override { + return disable_stateless_session_resumption_; + } private: static const unsigned DEFAULT_MIN_VERSION; @@ -165,6 +168,7 @@ class ServerContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::Ser ServerContextConfig::SessionTicketKey getSessionTicketKey(const std::string& key_data); absl::optional session_timeout_; + const bool disable_stateless_session_resumption_; }; } // namespace Tls diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 6e68385d73e4..7292bba9b005 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -980,7 +980,9 @@ ServerContextImpl::ServerContextImpl(Stats::Scope& scope, this); } - if (!session_ticket_keys_.empty()) { + if (config.disableStatelessSessionResumption()) { + SSL_CTX_set_options(ctx.ssl_ctx_.get(), SSL_OP_NO_TICKET); + } else if (!session_ticket_keys_.empty()) { SSL_CTX_set_tlsext_ticket_key_cb( ctx.ssl_ctx_.get(), [](SSL* ssl, uint8_t* key_name, uint8_t* iv, EVP_CIPHER_CTX* ctx, HMAC_CTX* hmac_ctx, diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index d2b0925c041c..1e3082f80653 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -152,8 +152,7 @@ Network::IoResult SslSocket::doRead(Buffer::Instance& read_buffer) { } } - ENVOY_CONN_LOG(trace, "ssl read {} bytes into {} slices", callbacks_->connection(), bytes_read, - read_buffer.getRawSlices(nullptr, 0)); + ENVOY_CONN_LOG(trace, "ssl read {} bytes", callbacks_->connection(), bytes_read); return {action, bytes_read, end_stream}; } diff --git a/source/extensions/wasm/BUILD b/source/extensions/wasm/BUILD index 5995fdb07d66..6c226c261000 100644 --- a/source/extensions/wasm/BUILD +++ b/source/extensions/wasm/BUILD @@ -14,6 +14,7 @@ envoy_cc_library( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + # security_posture = "unknown", docs requires it, bazel fails if it is included. deps = [ "//include/envoy/registry", "//include/envoy/server:wasm_config_interface", @@ -23,6 +24,6 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/extensions/common/wasm:wasm_lib", "//source/extensions/grpc_credentials:well_known_names", - "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + "@envoy_api//envoy/config/wasm/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/wasm/config.cc b/source/extensions/wasm/config.cc index 865bd3885591..43d078f26ba5 100644 --- a/source/extensions/wasm/config.cc +++ b/source/extensions/wasm/config.cc @@ -16,7 +16,7 @@ namespace Wasm { static const std::string INLINE_STRING = ""; -void WasmFactory::createWasm(const envoy::extensions::wasm::v3::WasmService& config, +void WasmFactory::createWasm(const envoy::config::wasm::v3::WasmService& config, Server::Configuration::WasmFactoryContext& context, Server::CreateWasmServiceCallback&& cb) { auto plugin = std::make_shared( diff --git a/source/extensions/wasm/config.h b/source/extensions/wasm/config.h index 0b2039e14066..27dcab6138f4 100644 --- a/source/extensions/wasm/config.h +++ b/source/extensions/wasm/config.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/config/wasm/v3/wasm.pb.validate.h" #include "envoy/server/wasm_config.h" #include "extensions/common/wasm/wasm.h" @@ -13,7 +13,7 @@ class WasmFactory : public Server::Configuration::WasmFactory { public: ~WasmFactory() override {} std::string name() override { return "envoy.wasm"; } - void createWasm(const envoy::extensions::wasm::v3::WasmService& config, + void createWasm(const envoy::config::wasm::v3::WasmService& config, Server::Configuration::WasmFactoryContext& context, Server::CreateWasmServiceCallback&& cb) override; diff --git a/source/server/BUILD b/source/server/BUILD index 37630ebb938e..85d2f09f1626 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -52,7 +52,7 @@ envoy_cc_library( "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + "@envoy_api//envoy/config/wasm/v3:pkg_cc_proto", ], ) @@ -77,6 +77,7 @@ envoy_cc_library( "//source/common/common:non_copyable", "//source/common/network:connection_lib", "//source/common/stats:timespan_lib", + "//source/common/stream_info:stream_info_lib", "//source/extensions/transport_sockets:well_known_names", ], ) @@ -255,14 +256,13 @@ envoy_cc_library( srcs = ["lds_api.cc"], hdrs = ["lds_api.h"], deps = [ - "//include/envoy/config:discovery_service_base_interface", "//include/envoy/config:subscription_factory_interface", "//include/envoy/config:subscription_interface", "//include/envoy/init:manager_interface", "//include/envoy/server:listener_manager_interface", "//source/common/common:cleanup_lib", "//source/common/config:api_version_lib", - "//source/common/config:resources_lib", + "//source/common/config:subscription_base_interface", "//source/common/config:utility_lib", "//source/common/init:target_lib", "//source/common/protobuf:utility_lib", @@ -318,10 +318,12 @@ envoy_cc_library( ":listener_manager_impl", ":transport_socket_config_lib", ":well_known_names_lib", + "//include/envoy/access_log:access_log_interface", "//include/envoy/server:active_udp_listener_config_interface", "//include/envoy/server:filter_config_interface", "//include/envoy/server:listener_manager_interface", "//include/envoy/server:transport_socket_config_interface", + "//source/common/access_log:access_log_lib", "//source/common/config:utility_lib", "//source/common/init:manager_lib", "//source/common/init:target_lib", @@ -366,6 +368,7 @@ envoy_cc_library( "//source/common/config:version_converter_lib", "//source/common/http:conn_manager_lib", "//source/common/init:manager_lib", + "//source/common/network:filter_matcher_lib", "//source/common/network:listen_socket_lib", "//source/common/network:socket_option_factory_lib", "//source/common/network:utility_lib", diff --git a/source/server/configuration_impl.h b/source/server/configuration_impl.h index b2c8532a6cd4..a75d13694ad4 100644 --- a/source/server/configuration_impl.h +++ b/source/server/configuration_impl.h @@ -12,7 +12,7 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/trace/v3/trace.pb.h" #include "envoy/config/typed_config.h" -#include "envoy/extensions/wasm/v3/wasm.pb.h" +#include "envoy/config/wasm/v3/wasm.pb.h" #include "envoy/http/filter.h" #include "envoy/network/filter.h" #include "envoy/server/configuration.h" @@ -34,7 +34,7 @@ namespace Configuration { */ class StatsSinkFactory : public Config::TypedFactory { public: - virtual ~StatsSinkFactory() = default; + ~StatsSinkFactory() override = default; /** * Create a particular Stats::Sink implementation. If the implementation is unable to produce a diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index fec70ae19b4a..83a1563f3dd5 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -39,7 +39,7 @@ void ConnectionHandlerImpl::addListener(Network::ListenerConfig& config) { config.udpListenerFactory()->createActiveUdpListener(*this, dispatcher_, config); } if (disable_listeners_) { - details.listener_->listener()->disable(); + details.listener_->pauseListening(); } listeners_.emplace_back(config.listenSocketFactory().localAddress(), std::move(details)); } @@ -57,28 +57,28 @@ void ConnectionHandlerImpl::removeListeners(uint64_t listener_tag) { void ConnectionHandlerImpl::stopListeners(uint64_t listener_tag) { for (auto& listener : listeners_) { if (listener.second.listener_->listenerTag() == listener_tag) { - listener.second.listener_->destroy(); + listener.second.listener_->shutdownListener(); } } } void ConnectionHandlerImpl::stopListeners() { for (auto& listener : listeners_) { - listener.second.listener_->destroy(); + listener.second.listener_->shutdownListener(); } } void ConnectionHandlerImpl::disableListeners() { disable_listeners_ = true; for (auto& listener : listeners_) { - listener.second.listener_->listener()->disable(); + listener.second.listener_->pauseListening(); } } void ConnectionHandlerImpl::enableListeners() { disable_listeners_ = false; for (auto& listener : listeners_) { - listener.second.listener_->listener()->enable(); + listener.second.listener_->resumeListening(); } } @@ -322,23 +322,42 @@ void ConnectionHandlerImpl::ActiveTcpListener::onAcceptWorker( } } +namespace { +void emitLogs(Network::ListenerConfig& config, StreamInfo::StreamInfo& stream_info) { + stream_info.onRequestComplete(); + for (const auto& access_log : config.accessLogs()) { + access_log->log(nullptr, nullptr, nullptr, stream_info); + } +} +} // namespace + void ConnectionHandlerImpl::ActiveTcpListener::newConnection( Network::ConnectionSocketPtr&& socket) { + auto stream_info = std::make_unique(parent_.dispatcher_.timeSource()); + stream_info->setDownstreamLocalAddress(socket->localAddress()); + stream_info->setDownstreamRemoteAddress(socket->remoteAddress()); + stream_info->setDownstreamDirectRemoteAddress(socket->directRemoteAddress()); + // Find matching filter chain. const auto filter_chain = config_.filterChainManager().findFilterChain(*socket); if (filter_chain == nullptr) { ENVOY_LOG(debug, "closing connection: no matching filter chain found"); stats_.no_filter_chain_match_.inc(); + stream_info->setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); + stream_info->setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().FilterChainNotFound); + emitLogs(config_, *stream_info); socket->close(); return; } auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); + stream_info->setDownstreamSslConnection(transport_socket->ssl()); auto& active_connections = getOrCreateActiveConnections(*filter_chain); - ActiveTcpConnectionPtr active_connection(new ActiveTcpConnection( - active_connections, - parent_.dispatcher_.createServerConnection(std::move(socket), std::move(transport_socket)), - parent_.dispatcher_.timeSource())); + auto server_conn_ptr = parent_.dispatcher_.createServerConnection( + std::move(socket), std::move(transport_socket), *stream_info); + ActiveTcpConnectionPtr active_connection( + new ActiveTcpConnection(active_connections, std::move(server_conn_ptr), + parent_.dispatcher_.timeSource(), config_, std::move(stream_info))); active_connection->connection_->setBufferLimits(config_.perConnectionBufferLimitBytes()); const bool empty_filter_chain = !config_.filterChainFactory().createNetworkFilterChain( @@ -414,10 +433,13 @@ ConnectionHandlerImpl::ActiveConnections::~ActiveConnections() { ConnectionHandlerImpl::ActiveTcpConnection::ActiveTcpConnection( ActiveConnections& active_connections, Network::ConnectionPtr&& new_connection, - TimeSource& time_source) - : active_connections_(active_connections), connection_(std::move(new_connection)), + TimeSource& time_source, Network::ListenerConfig& config, + std::unique_ptr&& stream_info) + : stream_info_(std::move(stream_info)), active_connections_(active_connections), + connection_(std::move(new_connection)), conn_length_(new Stats::HistogramCompletableTimespanImpl( - active_connections_.listener_.stats_.downstream_cx_length_ms_, time_source)) { + active_connections_.listener_.stats_.downstream_cx_length_ms_, time_source)), + config_(config) { // We just universally set no delay on connections. Theoretically we might at some point want // to make this configurable. connection_->noDelay(true); @@ -434,6 +456,8 @@ ConnectionHandlerImpl::ActiveTcpConnection::ActiveTcpConnection( } ConnectionHandlerImpl::ActiveTcpConnection::~ActiveTcpConnection() { + emitLogs(config_, *stream_info_); + active_connections_.listener_.stats_.downstream_cx_active_.dec(); active_connections_.listener_.stats_.downstream_cx_destroy_.inc(); active_connections_.listener_.per_worker_stats_.downstream_cx_active_.dec(); diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index 10a0d7340d4f..3a8812ad74b3 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -16,6 +16,7 @@ #include "envoy/server/listener_manager.h" #include "envoy/stats/scope.h" #include "envoy/stats/timespan.h" +#include "envoy/stream_info/stream_info.h" #include "common/common/linked_object.h" #include "common/common/non_copyable.h" @@ -120,7 +121,9 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, // ActiveListenerImplBase Network::Listener* listener() override { return listener_.get(); } - void destroy() override { listener_.reset(); } + void pauseListening() override { listener_->disable(); } + void resumeListening() override { listener_->enable(); } + void shutdownListener() override { listener_.reset(); } // Network::BalancedConnectionHandler uint64_t numConnections() const override { return num_listener_connections_; } @@ -159,7 +162,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, class ActiveConnections : public Event::DeferredDeletable { public: ActiveConnections(ActiveTcpListener& listener, const Network::FilterChain& filter_chain); - ~ActiveConnections(); + ~ActiveConnections() override; // listener filter chain pair is the owner of the connections ActiveTcpListener& listener_; @@ -175,7 +178,9 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, public Event::DeferredDeletable, public Network::ConnectionCallbacks { ActiveTcpConnection(ActiveConnections& active_connections, - Network::ConnectionPtr&& new_connection, TimeSource& time_system); + Network::ConnectionPtr&& new_connection, TimeSource& time_system, + Network::ListenerConfig& config, + std::unique_ptr&& stream_info); ~ActiveTcpConnection() override; // Network::ConnectionCallbacks @@ -189,9 +194,11 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} + std::unique_ptr stream_info_; ActiveConnections& active_connections_; Network::ConnectionPtr connection_; Stats::TimespanPtr conn_length_; + Network::ListenerConfig& config_; }; /** @@ -229,9 +236,40 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, void unlink(); void newConnection(); + class GenericListenerFilter : public Network::ListenerFilter { + public: + GenericListenerFilter(const Network::ListenerFilterMatcherSharedPtr& matcher, + Network::ListenerFilterPtr listener_filter) + : listener_filter_(std::move(listener_filter)), matcher_(std::move(matcher)) {} + Network::FilterStatus onAccept(ListenerFilterCallbacks& cb) override { + if (isDisabled(cb)) { + return Network::FilterStatus::Continue; + } + return listener_filter_->onAccept(cb); + } + /** + * Check if this filter filter should be disabled on the incoming socket. + * @param cb the callbacks the filter instance can use to communicate with the filter chain. + **/ + bool isDisabled(ListenerFilterCallbacks& cb) { + if (matcher_ == nullptr) { + return false; + } else { + return matcher_->matches(cb); + } + } + + private: + const Network::ListenerFilterPtr listener_filter_; + const Network::ListenerFilterMatcherSharedPtr matcher_; + }; + using ListenerFilterWrapperPtr = std::unique_ptr; + // Network::ListenerFilterManager - void addAcceptFilter(Network::ListenerFilterPtr&& filter) override { - accept_filters_.emplace_back(std::move(filter)); + void addAcceptFilter(const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + Network::ListenerFilterPtr&& filter) override { + accept_filters_.emplace_back( + std::make_unique(listener_filter_matcher, std::move(filter))); } // Network::ListenerFilterCallbacks @@ -242,8 +280,8 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, ActiveTcpListener& listener_; Network::ConnectionSocketPtr socket_; const bool hand_off_restored_destination_connections_; - std::list accept_filters_; - std::list::iterator iter_; + std::list accept_filters_; + std::list::iterator iter_; Event::TimerPtr timer_; }; @@ -287,7 +325,9 @@ class ActiveUdpListener : public Network::UdpListenerCallbacks, // ActiveListenerImplBase Network::Listener* listener() override { return udp_listener_.get(); } - void destroy() override { udp_listener_.reset(); } + void pauseListening() override { udp_listener_->disable(); } + void resumeListening() override { udp_listener_->enable(); } + void shutdownListener() override { udp_listener_.reset(); } // Network::UdpListenerFilterManager void addReadFilter(Network::UdpListenerReadFilterPtr&& filter) override; diff --git a/source/server/filter_chain_factory_context_callback.h b/source/server/filter_chain_factory_context_callback.h index fdb0cf3d2264..1230bfe5c7e1 100644 --- a/source/server/filter_chain_factory_context_callback.h +++ b/source/server/filter_chain_factory_context_callback.h @@ -21,7 +21,7 @@ class FilterChainFactoryContextCreator { * Generate the filter chain factory context from proto. Note the caller does not own the filter * chain context. */ - virtual Configuration::FilterChainFactoryContext& createFilterChainFactoryContext( + virtual std::unique_ptr createFilterChainFactoryContext( const ::envoy::config::listener::v3::FilterChain* const filter_chain) PURE; }; diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 4e00a584b641..8b8345f64cc4 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -2,6 +2,7 @@ #include "envoy/config/listener/v3/listener_components.pb.h" +#include "common/common/cleanup.h" #include "common/common/empty_string.h" #include "common/common/fmt.h" #include "common/config/utility.h" @@ -25,115 +26,119 @@ Network::Address::InstanceConstSharedPtr fakeAddress() { } // namespace -FilterChainFactoryContextImpl::FilterChainFactoryContextImpl( - Configuration::FactoryContext& parent_context) - : parent_context_(parent_context) {} +PerFilterChainFactoryContextImpl::PerFilterChainFactoryContextImpl( + Configuration::FactoryContext& parent_context, Init::Manager& init_manager) + : parent_context_(parent_context), init_manager_(init_manager) {} -bool FilterChainFactoryContextImpl::drainClose() const { - // TODO(lambdai): will provide individual value for each filter chain context. - return parent_context_.drainDecision().drainClose(); +bool PerFilterChainFactoryContextImpl::drainClose() const { + return is_draining_.load() || parent_context_.drainDecision().drainClose(); } -Network::DrainDecision& FilterChainFactoryContextImpl::drainDecision() { return *this; } +Network::DrainDecision& PerFilterChainFactoryContextImpl::drainDecision() { return *this; } -// TODO(lambdai): init manager will be provided for each filter chain update. -Init::Manager& FilterChainFactoryContextImpl::initManager() { - return parent_context_.initManager(); -} +Init::Manager& PerFilterChainFactoryContextImpl::initManager() { return init_manager_; } -ThreadLocal::SlotAllocator& FilterChainFactoryContextImpl::threadLocal() { +ThreadLocal::SlotAllocator& PerFilterChainFactoryContextImpl::threadLocal() { return parent_context_.threadLocal(); } -const envoy::config::core::v3::Metadata& FilterChainFactoryContextImpl::listenerMetadata() const { +const envoy::config::core::v3::Metadata& +PerFilterChainFactoryContextImpl::listenerMetadata() const { return parent_context_.listenerMetadata(); } -envoy::config::core::v3::TrafficDirection FilterChainFactoryContextImpl::direction() const { +envoy::config::core::v3::TrafficDirection PerFilterChainFactoryContextImpl::direction() const { return parent_context_.direction(); } -ProtobufMessage::ValidationContext& FilterChainFactoryContextImpl::messageValidationContext() { +ProtobufMessage::ValidationContext& PerFilterChainFactoryContextImpl::messageValidationContext() { return parent_context_.messageValidationContext(); } -ProtobufMessage::ValidationVisitor& FilterChainFactoryContextImpl::messageValidationVisitor() { +ProtobufMessage::ValidationVisitor& PerFilterChainFactoryContextImpl::messageValidationVisitor() { return parent_context_.messageValidationVisitor(); } -AccessLog::AccessLogManager& FilterChainFactoryContextImpl::accessLogManager() { +AccessLog::AccessLogManager& PerFilterChainFactoryContextImpl::accessLogManager() { return parent_context_.accessLogManager(); } -Upstream::ClusterManager& FilterChainFactoryContextImpl::clusterManager() { +Upstream::ClusterManager& PerFilterChainFactoryContextImpl::clusterManager() { return parent_context_.clusterManager(); } -Event::Dispatcher& FilterChainFactoryContextImpl::dispatcher() { +Event::Dispatcher& PerFilterChainFactoryContextImpl::dispatcher() { return parent_context_.dispatcher(); } -Grpc::Context& FilterChainFactoryContextImpl::grpcContext() { +Grpc::Context& PerFilterChainFactoryContextImpl::grpcContext() { return parent_context_.grpcContext(); } -bool FilterChainFactoryContextImpl::healthCheckFailed() { +bool PerFilterChainFactoryContextImpl::healthCheckFailed() { return parent_context_.healthCheckFailed(); } -Http::Context& FilterChainFactoryContextImpl::httpContext() { +Http::Context& PerFilterChainFactoryContextImpl::httpContext() { return parent_context_.httpContext(); } -const LocalInfo::LocalInfo& FilterChainFactoryContextImpl::localInfo() const { +const LocalInfo::LocalInfo& PerFilterChainFactoryContextImpl::localInfo() const { return parent_context_.localInfo(); } -Envoy::Runtime::RandomGenerator& FilterChainFactoryContextImpl::random() { +Envoy::Runtime::RandomGenerator& PerFilterChainFactoryContextImpl::random() { return parent_context_.random(); } -Envoy::Runtime::Loader& FilterChainFactoryContextImpl::runtime() { +Envoy::Runtime::Loader& PerFilterChainFactoryContextImpl::runtime() { return parent_context_.runtime(); } -Stats::Scope& FilterChainFactoryContextImpl::scope() { return parent_context_.scope(); } +Stats::Scope& PerFilterChainFactoryContextImpl::scope() { return parent_context_.scope(); } -Singleton::Manager& FilterChainFactoryContextImpl::singletonManager() { +Singleton::Manager& PerFilterChainFactoryContextImpl::singletonManager() { return parent_context_.singletonManager(); } -OverloadManager& FilterChainFactoryContextImpl::overloadManager() { +OverloadManager& PerFilterChainFactoryContextImpl::overloadManager() { return parent_context_.overloadManager(); } -Admin& FilterChainFactoryContextImpl::admin() { return parent_context_.admin(); } +Admin& PerFilterChainFactoryContextImpl::admin() { return parent_context_.admin(); } -TimeSource& FilterChainFactoryContextImpl::timeSource() { return api().timeSource(); } +TimeSource& PerFilterChainFactoryContextImpl::timeSource() { return api().timeSource(); } -Api::Api& FilterChainFactoryContextImpl::api() { return parent_context_.api(); } +Api::Api& PerFilterChainFactoryContextImpl::api() { return parent_context_.api(); } -ServerLifecycleNotifier& FilterChainFactoryContextImpl::lifecycleNotifier() { +ServerLifecycleNotifier& PerFilterChainFactoryContextImpl::lifecycleNotifier() { return parent_context_.lifecycleNotifier(); } -ProcessContextOptRef FilterChainFactoryContextImpl::processContext() { +ProcessContextOptRef PerFilterChainFactoryContextImpl::processContext() { return parent_context_.processContext(); } Configuration::ServerFactoryContext& -FilterChainFactoryContextImpl::getServerFactoryContext() const { +PerFilterChainFactoryContextImpl::getServerFactoryContext() const { return parent_context_.getServerFactoryContext(); } Configuration::TransportSocketFactoryContext& -FilterChainFactoryContextImpl::getTransportSocketFactoryContext() const { +PerFilterChainFactoryContextImpl::getTransportSocketFactoryContext() const { return parent_context_.getTransportSocketFactoryContext(); } -Stats::Scope& FilterChainFactoryContextImpl::listenerScope() { +Stats::Scope& PerFilterChainFactoryContextImpl::listenerScope() { return parent_context_.listenerScope(); } +FilterChainManagerImpl::FilterChainManagerImpl( + const Network::Address::InstanceConstSharedPtr& address, + Configuration::FactoryContext& factory_context, Init::Manager& init_manager, + const FilterChainManagerImpl& parent_manager) + : address_(address), parent_context_(factory_context), origin_(&parent_manager), + init_manager_(init_manager) {} + bool FilterChainManagerImpl::isWildcardServerName(const std::string& name) { return absl::StartsWith(name, "*."); } @@ -142,8 +147,10 @@ void FilterChainManagerImpl::addFilterChain( absl::Span filter_chain_span, FilterChainFactoryBuilder& filter_chain_factory_builder, FilterChainFactoryContextCreator& context_creator) { + Cleanup cleanup([this]() { origin_ = absl::nullopt; }); std::unordered_set filter_chains; + uint32_t new_filter_chain_size = 0; for (const auto& filter_chain : filter_chain_span) { const auto& filter_chain_match = filter_chain->filter_chain_match(); if (!filter_chain_match.address_suffix().empty() || filter_chain_match.has_suffix_len()) { @@ -184,16 +191,27 @@ void FilterChainManagerImpl::addFilterChain( } } + // Reuse created filter chain if possible. + // FilterChainManager maintains the lifetime of FilterChainFactoryContext + // ListenerImpl maintains the dependencies of FilterChainFactoryContext + auto filter_chain_impl = findExistingFilterChain(*filter_chain); + if (filter_chain_impl == nullptr) { + filter_chain_impl = + filter_chain_factory_builder.buildFilterChain(*filter_chain, context_creator); + ++new_filter_chain_size; + } + addFilterChainForDestinationPorts( destination_ports_map_, PROTOBUF_GET_WRAPPED_OR_DEFAULT(filter_chain_match, destination_port, 0), destination_ips, filter_chain_match.server_names(), filter_chain_match.transport_protocol(), filter_chain_match.application_protocols(), filter_chain_match.source_type(), source_ips, - filter_chain_match.source_ports(), - std::shared_ptr( - filter_chain_factory_builder.buildFilterChain(*filter_chain, context_creator))); + filter_chain_match.source_ports(), filter_chain_impl); + fc_contexts_[*filter_chain] = filter_chain_impl; } convertIPsToTries(); + ENVOY_LOG(debug, "new fc_contexts has {} filter chains, including {} newly built", + fc_contexts_.size(), new_filter_chain_size); } void FilterChainManagerImpl::addFilterChainForDestinationPorts( @@ -574,12 +592,28 @@ void FilterChainManagerImpl::convertIPsToTries() { } } -Configuration::FilterChainFactoryContext& FilterChainManagerImpl::createFilterChainFactoryContext( +std::shared_ptr FilterChainManagerImpl::findExistingFilterChain( + const envoy::config::listener::v3::FilterChain& filter_chain_message) { + // Origin filter chain manager could be empty if the current is the ancestor. + const auto* origin = getOriginFilterChainManager(); + if (origin == nullptr) { + return nullptr; + } + auto iter = origin->fc_contexts_.find(filter_chain_message); + if (iter != origin->fc_contexts_.end()) { + // copy the context to this filter chain manager. + fc_contexts_.emplace(filter_chain_message, iter->second); + return iter->second; + } + return nullptr; +} + +std::unique_ptr +FilterChainManagerImpl::createFilterChainFactoryContext( const ::envoy::config::listener::v3::FilterChain* const filter_chain) { - // TODO(lambdai): drain close should be saved in per filter chain context + // TODO(lambdai): add stats UNREFERENCED_PARAMETER(filter_chain); - factory_contexts_.push_back(std::make_unique(parent_context_)); - return *factory_contexts_.back(); + return std::make_unique(parent_context_, init_manager_); } FactoryContextImpl::FactoryContextImpl(Server::Instance& server, diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 6bbe133ddc35..195897ca11a3 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -25,19 +26,24 @@ namespace Server { class FilterChainFactoryBuilder { public: virtual ~FilterChainFactoryBuilder() = default; - virtual std::unique_ptr + /** + * @return Shared filter chain where builder is allowed to determine and reuse duplicated filter + * chain. Throw exception if failed. + */ + virtual std::shared_ptr buildFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain, FilterChainFactoryContextCreator& context_creator) const PURE; }; -// FilterChainFactoryContextImpl is supposed to be used by network filter chain. +// PerFilterChainFactoryContextImpl is supposed to be used by network filter chain. // Its lifetime must cover the created network filter chain. // Its lifetime should be covered by the owned listeners so as to support replacing the active // filter chains in the listener. -class FilterChainFactoryContextImpl : public Configuration::FilterChainFactoryContext, - public Network::DrainDecision { +class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactoryContext, + public Network::DrainDecision { public: - explicit FilterChainFactoryContextImpl(Configuration::FactoryContext& parent_context); + explicit PerFilterChainFactoryContextImpl(Configuration::FactoryContext& parent_context, + Init::Manager& init_manager); // DrainDecision bool drainClose() const override; @@ -71,8 +77,40 @@ class FilterChainFactoryContextImpl : public Configuration::FilterChainFactoryCo Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override; Stats::Scope& listenerScope() override; + void startDraining() override { is_draining_.store(true); } + private: Configuration::FactoryContext& parent_context_; + Init::Manager& init_manager_; + std::atomic is_draining_{false}; +}; + +class FilterChainImpl : public Network::DrainableFilterChain { +public: + FilterChainImpl(Network::TransportSocketFactoryPtr&& transport_socket_factory, + std::vector&& filters_factory) + : transport_socket_factory_(std::move(transport_socket_factory)), + filters_factory_(std::move(filters_factory)) {} + + // Network::FilterChain + const Network::TransportSocketFactory& transportSocketFactory() const override { + return *transport_socket_factory_; + } + const std::vector& networkFilterFactories() const override { + return filters_factory_; + } + void startDraining() override { factory_context_->startDraining(); } + + void setFilterChainFactoryContext( + Configuration::FilterChainFactoryContextPtr filter_chain_factory_context) { + ASSERT(factory_context_ == nullptr); + factory_context_ = std::move(filter_chain_factory_context); + } + +private: + Configuration::FilterChainFactoryContextPtr factory_context_; + const Network::TransportSocketFactoryPtr transport_socket_factory_; + const std::vector filters_factory_; }; /** @@ -122,24 +160,34 @@ class FactoryContextImpl : public Configuration::FactoryContext { }; /** - * Implementation of FilterChainManager. + * Implementation of FilterChainManager. It owns and exchange filter chains. */ class FilterChainManagerImpl : public Network::FilterChainManager, public FilterChainFactoryContextCreator, Logger::Loggable { public: + using FcContextMap = + absl::flat_hash_map, MessageUtil, MessageUtil>; + FilterChainManagerImpl(const Network::Address::InstanceConstSharedPtr& address, + Configuration::FactoryContext& factory_context, + Init::Manager& init_manager) + : address_(address), parent_context_(factory_context), init_manager_(init_manager) {} + FilterChainManagerImpl(const Network::Address::InstanceConstSharedPtr& address, - Configuration::FactoryContext& factory_context) - : address_(address), parent_context_(factory_context) {} + Configuration::FactoryContext& factory_context, + Init::Manager& init_manager, const FilterChainManagerImpl& parent_manager); // FilterChainFactoryContextCreator - Configuration::FilterChainFactoryContext& createFilterChainFactoryContext( + std::unique_ptr createFilterChainFactoryContext( const ::envoy::config::listener::v3::FilterChain* const filter_chain) override; // Network::FilterChainManager const Network::FilterChain* findFilterChain(const Network::ConnectionSocket& socket) const override; + // Add all filter chains into this manager. During the lifetime of FilterChainManagerImpl this + // should be called at most once. void addFilterChain( absl::Span filter_chain_span, FilterChainFactoryBuilder& b, FilterChainFactoryContextCreator& context_creator); @@ -234,34 +282,34 @@ class FilterChainManagerImpl : public Network::FilterChainManager, findFilterChainForSourceIpAndPort(const SourceIPsTrie& source_ips_trie, const Network::ConnectionSocket& socket) const; + const FilterChainManagerImpl* getOriginFilterChainManager() { + ASSERT(origin_.has_value()); + return origin_.value(); + } + // Duplicate the inherent factory context if any. + std::shared_ptr + findExistingFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain_message); + + // Mapping from filter chain message to filter chain. This is used by LDS response handler to + // detect the filter chains in the intersection of existing listener and new listener. + FcContextMap fc_contexts_; + // Mapping of FilterChain's configured destination ports, IPs, server names, transport protocols // and application protocols, using structures defined above. DestinationPortsMap destination_ports_map_; const Network::Address::InstanceConstSharedPtr address_; + // This is the reference to a factory context which all the generations of listener share. Configuration::FactoryContext& parent_context_; - std::list> factory_contexts_; -}; + std::list> factory_contexts_; -class FilterChainImpl : public Network::FilterChain { -public: - FilterChainImpl(Network::TransportSocketFactoryPtr&& transport_socket_factory, - std::vector&& filters_factory) - : transport_socket_factory_(std::move(transport_socket_factory)), - filters_factory_(std::move(filters_factory)) {} + // Reference to the previous generation of filter chain manager to share the filter chains. + // Caution: only during warm up could the optional have value. + absl::optional origin_{nullptr}; - // Network::FilterChain - const Network::TransportSocketFactory& transportSocketFactory() const override { - return *transport_socket_factory_; - } - - const std::vector& networkFilterFactories() const override { - return filters_factory_; - } - -private: - const Network::TransportSocketFactoryPtr transport_socket_factory_; - const std::vector filters_factory_; + // For FilterChainFactoryContextCreator + // init manager owned by the corresponding listener. The reference is valid when building the + // filter chain. + Init::Manager& init_manager_; }; - } // namespace Server } // namespace Envoy diff --git a/source/server/hot_restarting_child.cc b/source/server/hot_restarting_child.cc index 7240fb1e9708..f5eb8296c663 100644 --- a/source/server/hot_restarting_child.cc +++ b/source/server/hot_restarting_child.cc @@ -98,7 +98,7 @@ void HotRestartingChild::mergeParentStats(Stats::Store& stats_store, // Convert the protobuf for serialized dynamic spans into the structure // required by StatMerger. Stats::StatMerger::DynamicsMap dynamics; - for (auto iter : stats_proto.dynamics()) { + for (const auto& iter : stats_proto.dynamics()) { Stats::DynamicSpans& spans = dynamics[iter.first]; for (int i = 0; i < iter.second.spans_size(); ++i) { const HotRestartMessage::Reply::Span& span_proto = iter.second.spans(i); diff --git a/source/server/http/BUILD b/source/server/http/BUILD index 0f80988f8786..d477a211afd3 100644 --- a/source/server/http/BUILD +++ b/source/server/http/BUILD @@ -13,9 +13,12 @@ envoy_cc_library( srcs = ["admin.cc"], hdrs = ["admin.h"], deps = [ + ":admin_filter_lib", ":config_tracker_lib", + ":utils_lib", "//include/envoy/filesystem:filesystem_interface", "//include/envoy/http:filter_interface", + "//include/envoy/http:request_id_extension_interface", "//include/envoy/network:filter_interface", "//include/envoy/network:listen_socket_interface", "//include/envoy/runtime:runtime_interface", @@ -33,7 +36,6 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/common:empty_string", - "//source/common/common:enum_to_int", "//source/common/common:macros", "//source/common/common:minimal_logger_lib", "//source/common/common:mutex_tracer_lib", @@ -46,6 +48,7 @@ envoy_cc_library( "//source/common/http:default_server_string_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", + "//source/common/http:request_id_extension_lib", "//source/common/http:utility_lib", "//source/common/memory:stats_lib", "//source/common/memory:utils_lib", @@ -68,6 +71,36 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "admin_filter_lib", + srcs = ["admin_filter.cc"], + hdrs = ["admin_filter.h"], + deps = [ + ":utils_lib", + "//include/envoy/http:filter_interface", + "//include/envoy/server:admin_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + ], +) + +envoy_cc_library( + name = "utils_lib", + srcs = ["utils.cc"], + hdrs = ["utils.h"], + deps = [ + "//include/envoy/init:manager_interface", + "//source/common/common:enum_to_int", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "config_tracker_lib", srcs = ["config_tracker_impl.cc"], diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index 0979542d4b2c..daddbcfc8a5e 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -34,7 +34,6 @@ #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" #include "common/common/empty_string.h" -#include "common/common/enum_to_int.h" #include "common/common/fmt.h" #include "common/common/mutex_tracer_impl.h" #include "common/common/utility.h" @@ -56,6 +55,8 @@ #include "common/stats/histogram_impl.h" #include "common/upstream/host_utility.h" +#include "server/http/utils.h" + #include "extensions/access_loggers/file/file_access_log_impl.h" #include "absl/strings/str_join.h" @@ -136,22 +137,6 @@ const std::regex PromRegex("[^a-zA-Z0-9_]"); const uint64_t RecentLookupsCapacity = 100; -void populateFallbackResponseHeaders(Http::Code code, Http::ResponseHeaderMap& header_map) { - header_map.setStatus(std::to_string(enumToInt(code))); - const auto& headers = Http::Headers::get(); - if (header_map.ContentType() == nullptr) { - // Default to text-plain if unset. - header_map.setReferenceContentType(headers.ContentTypeValues.TextUtf8); - } - // Default to 'no-cache' if unset, but not 'no-store' which may break the back button. - if (header_map.CacheControl() == nullptr) { - header_map.setReferenceCacheControl(headers.CacheControlValues.NoCacheMaxAge0); - } - - // Under no circumstance should browsers sniff content-type. - header_map.addReference(headers.XContentTypeOptions, headers.XContentTypeOptionValues.Nosniff); -} - // Helper method to get filter parameter, or report an error for an invalid regex. bool filterParam(Http::Utility::QueryParams params, Buffer::Instance& response, absl::optional& regex) { @@ -259,7 +244,7 @@ void trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Messag std::string any_field_name; for (int i = 0; i < field_mask.paths().size(); ++i) { const std::string& path = field_mask.paths(i); - std::vector frags = absl::StrSplit(path, "."); + std::vector frags = absl::StrSplit(path, '.'); if (frags.empty()) { continue; } @@ -316,59 +301,6 @@ void trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Messag } // namespace -AdminFilter::AdminFilter(AdminImpl& parent) : parent_(parent) {} - -Http::FilterHeadersStatus AdminFilter::decodeHeaders(Http::RequestHeaderMap& headers, - bool end_stream) { - request_headers_ = &headers; - if (end_stream) { - onComplete(); - } - - return Http::FilterHeadersStatus::StopIteration; -} - -Http::FilterDataStatus AdminFilter::decodeData(Buffer::Instance& data, bool end_stream) { - // Currently we generically buffer all admin request data in case a handler wants to use it. - // If we ever support streaming admin requests we may need to revisit this. Note, we must use - // addDecodedData() here since we might need to perform onComplete() processing if end_stream is - // true. - callbacks_->addDecodedData(data, false); - - if (end_stream) { - onComplete(); - } - - return Http::FilterDataStatus::StopIterationNoBuffer; -} - -Http::FilterTrailersStatus AdminFilter::decodeTrailers(Http::RequestTrailerMap&) { - onComplete(); - return Http::FilterTrailersStatus::StopIteration; -} - -void AdminFilter::onDestroy() { - for (const auto& callback : on_destroy_callbacks_) { - callback(); - } -} - -void AdminFilter::addOnDestroyCallback(std::function cb) { - on_destroy_callbacks_.push_back(std::move(cb)); -} - -Http::StreamDecoderFilterCallbacks& AdminFilter::getDecoderFilterCallbacks() const { - ASSERT(callbacks_ != nullptr); - return *callbacks_; -} - -const Buffer::Instance* AdminFilter::getRequestBody() const { return callbacks_->decodingBuffer(); } - -const Http::RequestHeaderMap& AdminFilter::getRequestHeaders() const { - ASSERT(request_headers_ != nullptr); - return *request_headers_; -} - bool AdminImpl::changeLogLevel(const Http::Utility::QueryParams& params) { if (params.size() != 1) { return false; @@ -421,19 +353,19 @@ void AdminImpl::addOutlierInfo(const std::string& cluster_name, Buffer::Instance& response) { if (outlier_detector) { response.add(fmt::format( - "{}::outlier::success_rate_average::{}\n", cluster_name, + "{}::outlier::success_rate_average::{:g}\n", cluster_name, outlier_detector->successRateAverage( Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))); response.add(fmt::format( - "{}::outlier::success_rate_ejection_threshold::{}\n", cluster_name, + "{}::outlier::success_rate_ejection_threshold::{:g}\n", cluster_name, outlier_detector->successRateEjectionThreshold( Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))); response.add(fmt::format( - "{}::outlier::local_origin_success_rate_average::{}\n", cluster_name, + "{}::outlier::local_origin_success_rate_average::{:g}\n", cluster_name, outlier_detector->successRateAverage( Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin))); response.add(fmt::format( - "{}::outlier::local_origin_success_rate_ejection_threshold::{}\n", cluster_name, + "{}::outlier::local_origin_success_rate_ejection_threshold::{:g}\n", cluster_name, outlier_detector->successRateEjectionThreshold( Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin))); } @@ -1360,23 +1292,6 @@ Http::Code AdminImpl::handlerReopenLogs(absl::string_view, Http::ResponseHeaderM ConfigTracker& AdminImpl::getConfigTracker() { return config_tracker_; } -void AdminFilter::onComplete() { - absl::string_view path = request_headers_->Path()->value().getStringView(); - ENVOY_STREAM_LOG(debug, "request complete: path: {}", *callbacks_, path); - - Buffer::OwnedImpl response; - Http::ResponseHeaderMapPtr header_map{new Http::ResponseHeaderMapImpl}; - RELEASE_ASSERT(request_headers_, ""); - Http::Code code = parent_.runCallback(path, *header_map, response, *this); - populateFallbackResponseHeaders(code, *header_map); - callbacks_->encodeHeaders(std::move(header_map), - end_stream_on_complete_ && response.length() == 0); - - if (response.length() > 0) { - callbacks_->encodeData(response, end_stream_on_complete_); - } -} - AdminImpl::NullRouteConfigProvider::NullRouteConfigProvider(TimeSource& time_source) : config_(new Router::NullConfigImpl()), time_source_(time_source) {} @@ -1405,7 +1320,9 @@ void AdminImpl::startHttpListener(const std::string& access_log_path, } AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) - : server_(server), profile_path_(profile_path), + : server_(server), + request_id_extension_(Http::RequestIDExtensionFactory::defaultInstance(server_.random())), + profile_path_(profile_path), stats_(Http::ConnectionManagerImpl::generateStats("http.admin.", server_.stats())), tracing_stats_( Http::ConnectionManagerImpl::generateTracingStats("http.admin.", no_op_store_)), @@ -1476,7 +1393,7 @@ Http::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection connection, data, callbacks, server_.stats(), Http::Http1Settings(), ::Envoy::Http2::Utility::initializeAndValidateOptions( envoy::config::core::v3::Http2ProtocolOptions()), - maxRequestHeadersKb(), maxRequestHeadersCount()); + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); } bool AdminImpl::createNetworkFilterChain(Network::Connection& connection, @@ -1490,12 +1407,13 @@ bool AdminImpl::createNetworkFilterChain(Network::Connection& connection, } void AdminImpl::createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) { - callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{new AdminFilter(*this)}); + callbacks.addStreamFilter(std::make_shared(createCallbackFunction())); } Http::Code AdminImpl::runCallback(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream& admin_stream) { + Http::Code code = Http::Code::OK; bool found_handler = false; @@ -1644,14 +1562,15 @@ bool AdminImpl::removeHandler(const std::string& prefix) { Http::Code AdminImpl::request(absl::string_view path_and_query, absl::string_view method, Http::ResponseHeaderMap& response_headers, std::string& body) { - AdminFilter filter(*this); + AdminFilter filter(createCallbackFunction()); + Http::RequestHeaderMapImpl request_headers; request_headers.setMethod(method); filter.decodeHeaders(request_headers, false); Buffer::OwnedImpl response; Http::Code code = runCallback(path_and_query, response_headers, response, filter); - populateFallbackResponseHeaders(code, response_headers); + Utility::populateFallbackResponseHeaders(code, response_headers); body = response.toString(); return code; } @@ -1668,19 +1587,5 @@ void AdminImpl::addListenerToHandler(Network::ConnectionHandler* handler) { } } -envoy::admin::v3::ServerInfo::State Utility::serverState(Init::Manager::State state, - bool health_check_failed) { - switch (state) { - case Init::Manager::State::Uninitialized: - return envoy::admin::v3::ServerInfo::PRE_INITIALIZING; - case Init::Manager::State::Initializing: - return envoy::admin::v3::ServerInfo::INITIALIZING; - case Init::Manager::State::Initialized: - return health_check_failed ? envoy::admin::v3::ServerInfo::DRAINING - : envoy::admin::v3::ServerInfo::LIVE; - } - NOT_REACHED_GCOVR_EXCL_LINE; -} - } // namespace Server } // namespace Envoy diff --git a/source/server/http/admin.h b/source/server/http/admin.h index 4405a1aeabad..82332658de56 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -13,6 +14,7 @@ #include "envoy/config/route/v3/route.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/http/filter.h" +#include "envoy/http/request_id_extension.h" #include "envoy/network/filter.h" #include "envoy/network/listen_socket.h" #include "envoy/runtime/runtime.h" @@ -29,24 +31,23 @@ #include "common/http/conn_manager_impl.h" #include "common/http/date_provider_impl.h" #include "common/http/default_server_string.h" +#include "common/http/request_id_extension_impl.h" #include "common/http/utility.h" #include "common/network/connection_balancer_impl.h" #include "common/network/raw_buffer_socket.h" #include "common/router/scoped_config_impl.h" #include "common/stats/isolated_store_impl.h" +#include "server/http/admin_filter.h" #include "server/http/config_tracker_impl.h" +#include "extensions/filters/http/common/pass_through_filter.h" + #include "absl/strings/string_view.h" namespace Envoy { namespace Server { -namespace Utility { -envoy::admin::v3::ServerInfo::State serverState(Init::Manager::State state, - bool health_check_failed); -} // namespace Utility - class AdminInternalAddressConfig : public Http::InternalAddressConfig { bool isInternalAddress(const Network::Address::Instance&) const override { return false; } }; @@ -105,6 +106,7 @@ class AdminImpl : public Admin, } // Http::ConnectionManagerConfig + Http::RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; } const std::list& accessLogs() override { return access_logs_; } Http::ServerConnectionPtr createCodec(Network::Connection& connection, const Buffer::Instance& data, @@ -160,12 +162,23 @@ class AdminImpl : public Admin, const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return true; } bool shouldMergeSlashes() const override { return true; } + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const override { + return envoy::config::core::v3::HttpProtocolOptions::ALLOW; + } Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::ResponseHeaderMap& response_headers, std::string& body) override; void closeSocket(); void addListenerToHandler(Network::ConnectionHandler* handler) override; Server::Instance& server() { return server_; } + AdminFilter::AdminServerCallbackFunction createCallbackFunction() { + return [this](absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, + Buffer::OwnedImpl& response, AdminFilter& filter) -> Http::Code { + return runCallback(path_and_query, response_headers, response, filter); + }; + } + private: /** * Individual admin handler including prefix, help text, and callback. @@ -405,9 +418,7 @@ class AdminImpl : public Admin, bool bindToPort() override { return true; } bool handOffRestoredDestinationConnections() const override { return false; } uint32_t perConnectionBufferLimitBytes() const override { return 0; } - std::chrono::milliseconds listenerFiltersTimeout() const override { - return std::chrono::milliseconds(); - } + std::chrono::milliseconds listenerFiltersTimeout() const override { return {}; } bool continueOnListenerFiltersTimeout() const override { return false; } Stats::Scope& listenerScope() override { return *scope_; } uint64_t listenerTag() const override { return 0; } @@ -419,12 +430,18 @@ class AdminImpl : public Admin, return envoy::config::core::v3::UNSPECIFIED; } Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; } + const std::vector& accessLogs() const override { + return empty_access_logs_; + } AdminImpl& parent_; const std::string name_; Stats::ScopePtr scope_; Http::ConnectionManagerListenerStats stats_; Network::NopConnectionBalancerImpl connection_balancer_; + + private: + const std::vector empty_access_logs_; }; using AdminListenerPtr = std::unique_ptr; @@ -449,6 +466,7 @@ class AdminImpl : public Admin, }; Server::Instance& server_; + Http::RequestIDExtensionSharedPtr request_id_extension_; std::list access_logs_; const std::string profile_path_; Http::ConnectionManagerStats stats_; @@ -476,50 +494,6 @@ class AdminImpl : public Admin, const AdminInternalAddressConfig internal_address_config_; }; -/** - * A terminal HTTP filter that implements server admin functionality. - */ -class AdminFilter : public Http::StreamDecoderFilter, - public AdminStream, - Logger::Loggable { -public: - AdminFilter(AdminImpl& parent); - - // Http::StreamFilterBase - void onDestroy() override; - - // Http::StreamDecoderFilter - Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, - bool end_stream) override; - Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override; - Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override; - void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override { - callbacks_ = &callbacks; - } - - // AdminStream - void setEndStreamOnComplete(bool end_stream) override { end_stream_on_complete_ = end_stream; } - void addOnDestroyCallback(std::function cb) override; - Http::StreamDecoderFilterCallbacks& getDecoderFilterCallbacks() const override; - const Buffer::Instance* getRequestBody() const override; - const Http::RequestHeaderMap& getRequestHeaders() const override; - -private: - /** - * Called when an admin request has been completely received. - */ - void onComplete(); - - AdminImpl& parent_; - // Handlers relying on the reference should use addOnDestroyCallback() - // to add a callback that will notify them when the reference is no - // longer valid. - Http::StreamDecoderFilterCallbacks* callbacks_{}; - Http::RequestHeaderMap* request_headers_{}; - std::list> on_destroy_callbacks_; - bool end_stream_on_complete_ = true; -}; - /** * Formatter for metric/labels exported to Prometheus. * diff --git a/source/server/http/admin_filter.cc b/source/server/http/admin_filter.cc new file mode 100644 index 000000000000..7f9cf3930974 --- /dev/null +++ b/source/server/http/admin_filter.cc @@ -0,0 +1,82 @@ +#include "server/http/admin_filter.h" + +#include "server/http/utils.h" + +namespace Envoy { +namespace Server { + +AdminFilter::AdminFilter(AdminServerCallbackFunction admin_server_callback_func) + : admin_server_callback_func_(admin_server_callback_func) {} + +Http::FilterHeadersStatus AdminFilter::decodeHeaders(Http::RequestHeaderMap& headers, + bool end_stream) { + request_headers_ = &headers; + if (end_stream) { + onComplete(); + } + + return Http::FilterHeadersStatus::StopIteration; +} + +Http::FilterDataStatus AdminFilter::decodeData(Buffer::Instance& data, bool end_stream) { + // Currently we generically buffer all admin request data in case a handler wants to use it. + // If we ever support streaming admin requests we may need to revisit this. Note, we must use + // addDecodedData() here since we might need to perform onComplete() processing if end_stream is + // true. + decoder_callbacks_->addDecodedData(data, false); + + if (end_stream) { + onComplete(); + } + + return Http::FilterDataStatus::StopIterationNoBuffer; +} + +Http::FilterTrailersStatus AdminFilter::decodeTrailers(Http::RequestTrailerMap&) { + onComplete(); + return Http::FilterTrailersStatus::StopIteration; +} + +void AdminFilter::onDestroy() { + for (const auto& callback : on_destroy_callbacks_) { + callback(); + } +} + +void AdminFilter::addOnDestroyCallback(std::function cb) { + on_destroy_callbacks_.push_back(std::move(cb)); +} + +Http::StreamDecoderFilterCallbacks& AdminFilter::getDecoderFilterCallbacks() const { + ASSERT(decoder_callbacks_ != nullptr); + return *decoder_callbacks_; +} + +const Buffer::Instance* AdminFilter::getRequestBody() const { + return decoder_callbacks_->decodingBuffer(); +} + +const Http::RequestHeaderMap& AdminFilter::getRequestHeaders() const { + ASSERT(request_headers_ != nullptr); + return *request_headers_; +} + +void AdminFilter::onComplete() { + absl::string_view path = request_headers_->Path()->value().getStringView(); + ENVOY_STREAM_LOG(debug, "request complete: path: {}", *decoder_callbacks_, path); + + Buffer::OwnedImpl response; + Http::ResponseHeaderMapPtr header_map{new Http::ResponseHeaderMapImpl}; + RELEASE_ASSERT(request_headers_, ""); + Http::Code code = admin_server_callback_func_(path, *header_map, response, *this); + Utility::populateFallbackResponseHeaders(code, *header_map); + decoder_callbacks_->encodeHeaders(std::move(header_map), + end_stream_on_complete_ && response.length() == 0); + + if (response.length() > 0) { + decoder_callbacks_->encodeData(response, end_stream_on_complete_); + } +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/admin_filter.h b/source/server/http/admin_filter.h new file mode 100644 index 000000000000..000279f8ff79 --- /dev/null +++ b/source/server/http/admin_filter.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include + +#include "envoy/http/filter.h" +#include "envoy/server/admin.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" +#include "common/http/codes.h" +#include "common/http/header_map_impl.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +/** + * A terminal HTTP filter that implements server admin functionality. + */ +class AdminFilter : public Http::PassThroughFilter, + public AdminStream, + Logger::Loggable { +public: + using AdminServerCallbackFunction = std::function; + + AdminFilter(AdminServerCallbackFunction admin_server_run_callback_func); + + // Http::StreamFilterBase + // Handlers relying on the reference should use addOnDestroyCallback() + // to add a callback that will notify them when the reference is no + // longer valid. + void onDestroy() override; + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, + bool end_stream) override; + Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override; + Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap& trailers) override; + + // AdminStream + void setEndStreamOnComplete(bool end_stream) override { end_stream_on_complete_ = end_stream; } + void addOnDestroyCallback(std::function cb) override; + Http::StreamDecoderFilterCallbacks& getDecoderFilterCallbacks() const override; + const Buffer::Instance* getRequestBody() const override; + const Http::RequestHeaderMap& getRequestHeaders() const override; + Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { + return encoder_callbacks_->http1StreamEncoderOptions(); + } + +private: + /** + * Called when an admin request has been completely received. + */ + void onComplete(); + AdminServerCallbackFunction admin_server_callback_func_; + Http::RequestHeaderMap* request_headers_{}; + std::list> on_destroy_callbacks_; + bool end_stream_on_complete_ = true; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/utils.cc b/source/server/http/utils.cc new file mode 100644 index 000000000000..4a63ea8cc56b --- /dev/null +++ b/source/server/http/utils.cc @@ -0,0 +1,42 @@ +#include "server/http/utils.h" + +#include "common/common/enum_to_int.h" +#include "common/http/headers.h" + +namespace Envoy { +namespace Server { +namespace Utility { + +envoy::admin::v3::ServerInfo::State serverState(Init::Manager::State state, + bool health_check_failed) { + switch (state) { + case Init::Manager::State::Uninitialized: + return envoy::admin::v3::ServerInfo::PRE_INITIALIZING; + case Init::Manager::State::Initializing: + return envoy::admin::v3::ServerInfo::INITIALIZING; + case Init::Manager::State::Initialized: + return health_check_failed ? envoy::admin::v3::ServerInfo::DRAINING + : envoy::admin::v3::ServerInfo::LIVE; + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + +void populateFallbackResponseHeaders(Http::Code code, Http::ResponseHeaderMap& header_map) { + header_map.setStatus(std::to_string(enumToInt(code))); + const auto& headers = Http::Headers::get(); + if (header_map.ContentType() == nullptr) { + // Default to text-plain if unset. + header_map.setReferenceContentType(headers.ContentTypeValues.TextUtf8); + } + // Default to 'no-cache' if unset, but not 'no-store' which may break the back button. + if (header_map.CacheControl() == nullptr) { + header_map.setReferenceCacheControl(headers.CacheControlValues.NoCacheMaxAge0); + } + + // Under no circumstance should browsers sniff content-type. + header_map.addReference(headers.XContentTypeOptions, headers.XContentTypeOptionValues.Nosniff); +} + +} // namespace Utility +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/utils.h b/source/server/http/utils.h new file mode 100644 index 000000000000..14a1d59dde24 --- /dev/null +++ b/source/server/http/utils.h @@ -0,0 +1,20 @@ +#pragma once + +#include "envoy/admin/v3/server_info.pb.h" +#include "envoy/init/manager.h" + +#include "common/http/codes.h" +#include "common/http/header_map_impl.h" + +namespace Envoy { +namespace Server { +namespace Utility { + +envoy::admin::v3::ServerInfo::State serverState(Init::Manager::State state, + bool health_check_failed); + +void populateFallbackResponseHeaders(Http::Code code, Http::ResponseHeaderMap& header_map); + +} // namespace Utility +} // namespace Server +} // namespace Envoy diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index b799140835be..5f4cecafc6ce 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -26,10 +26,12 @@ LdsApiImpl::LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config, Upstream::ClusterManager& cm, Init::Manager& init_manager, Stats::Scope& scope, ListenerManager& lm, ProtobufMessage::ValidationVisitor& validation_visitor) - : listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm), + : Envoy::Config::SubscriptionBase( + lds_config.resource_api_version()), + listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm), init_target_("LDS", [this]() { subscription_->start({}); }), validation_visitor_(validation_visitor) { - const auto resource_name = getResourceName(lds_config.resource_api_version()); + const auto resource_name = getResourceName(); subscription_ = cm.subscriptionFactory().subscriptionFromConfigSource( lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this); init_manager.add(init_target_); @@ -135,4 +137,4 @@ void LdsApiImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason r } } // namespace Server -} // namespace Envoy +} // namespace Envoy \ No newline at end of file diff --git a/source/server/lds_api.h b/source/server/lds_api.h index 5a1f64d916fd..00a415563668 100644 --- a/source/server/lds_api.h +++ b/source/server/lds_api.h @@ -3,7 +3,6 @@ #include #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/discovery_service_base.h" #include "envoy/config/listener/v3/listener.pb.h" #include "envoy/config/subscription.h" #include "envoy/config/subscription_factory.h" @@ -13,6 +12,7 @@ #include "envoy/stats/scope.h" #include "common/common/logger.h" +#include "common/config/subscription_base.h" #include "common/init/target_impl.h" namespace Envoy { diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index f2f016286eb7..149d8d1fba21 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -8,6 +8,7 @@ #include "envoy/server/transport_socket_config.h" #include "envoy/stats/scope.h" +#include "common/access_log/access_log_impl.h" #include "common/common/assert.h" #include "common/config/utility.h" #include "common/network/connection_balancer_impl.h" @@ -122,15 +123,83 @@ Network::SocketSharedPtr ListenSocketFactoryImpl::getListenSocket() { return createListenSocketAndApplyOptions(); } +ListenerFactoryContextBaseImpl::ListenerFactoryContextBaseImpl( + Envoy::Server::Instance& server, ProtobufMessage::ValidationVisitor& validation_visitor, + const envoy::config::listener::v3::Listener& config, DrainManagerPtr drain_manager) + : server_(server), metadata_(config.metadata()), direction_(config.traffic_direction()), + global_scope_(server.stats().createScope("")), + listener_scope_(server_.stats().createScope(fmt::format( + "listener.{}.", Network::Address::resolveProtoAddress(config.address())->asString()))), + validation_visitor_(validation_visitor), drain_manager_(std::move(drain_manager)) {} + +AccessLog::AccessLogManager& ListenerFactoryContextBaseImpl::accessLogManager() { + return server_.accessLogManager(); +} +Upstream::ClusterManager& ListenerFactoryContextBaseImpl::clusterManager() { + return server_.clusterManager(); +} +Event::Dispatcher& ListenerFactoryContextBaseImpl::dispatcher() { return server_.dispatcher(); } +Grpc::Context& ListenerFactoryContextBaseImpl::grpcContext() { return server_.grpcContext(); } +bool ListenerFactoryContextBaseImpl::healthCheckFailed() { return server_.healthCheckFailed(); } +Http::Context& ListenerFactoryContextBaseImpl::httpContext() { return server_.httpContext(); } +const LocalInfo::LocalInfo& ListenerFactoryContextBaseImpl::localInfo() const { + return server_.localInfo(); +} +Envoy::Runtime::RandomGenerator& ListenerFactoryContextBaseImpl::random() { + return server_.random(); +} +Envoy::Runtime::Loader& ListenerFactoryContextBaseImpl::runtime() { return server_.runtime(); } +Stats::Scope& ListenerFactoryContextBaseImpl::scope() { return *global_scope_; } +Singleton::Manager& ListenerFactoryContextBaseImpl::singletonManager() { + return server_.singletonManager(); +} +OverloadManager& ListenerFactoryContextBaseImpl::overloadManager() { + return server_.overloadManager(); +} +ThreadLocal::Instance& ListenerFactoryContextBaseImpl::threadLocal() { + return server_.threadLocal(); +} +Admin& ListenerFactoryContextBaseImpl::admin() { return server_.admin(); } +const envoy::config::core::v3::Metadata& ListenerFactoryContextBaseImpl::listenerMetadata() const { + return metadata_; +}; +envoy::config::core::v3::TrafficDirection ListenerFactoryContextBaseImpl::direction() const { + return direction_; +}; +TimeSource& ListenerFactoryContextBaseImpl::timeSource() { return api().timeSource(); } +ProtobufMessage::ValidationContext& ListenerFactoryContextBaseImpl::messageValidationContext() { + return server_.messageValidationContext(); +} +ProtobufMessage::ValidationVisitor& ListenerFactoryContextBaseImpl::messageValidationVisitor() { + return validation_visitor_; +} +Api::Api& ListenerFactoryContextBaseImpl::api() { return server_.api(); } +ServerLifecycleNotifier& ListenerFactoryContextBaseImpl::lifecycleNotifier() { + return server_.lifecycleNotifier(); +} +ProcessContextOptRef ListenerFactoryContextBaseImpl::processContext() { + return server_.processContext(); +} +Configuration::ServerFactoryContext& +ListenerFactoryContextBaseImpl::getServerFactoryContext() const { + return server_.serverFactoryContext(); +} +Configuration::TransportSocketFactoryContext& +ListenerFactoryContextBaseImpl::getTransportSocketFactoryContext() const { + return server_.transportSocketFactoryContext(); +} +Stats::Scope& ListenerFactoryContextBaseImpl::listenerScope() { return *listener_scope_; } +Network::DrainDecision& ListenerFactoryContextBaseImpl::drainDecision() { return *this; } +Server::DrainManager& ListenerFactoryContextBaseImpl::drainManager() { return *drain_manager_; } + +// Must be overridden +Init::Manager& ListenerFactoryContextBaseImpl::initManager() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, const std::string& version_info, ListenerManagerImpl& parent, const std::string& name, bool added_via_api, bool workers_started, uint64_t hash, uint32_t concurrency) : parent_(parent), address_(Network::Address::resolveProtoAddress(config.address())), - filter_chain_manager_(address_, *this), - global_scope_(parent_.server_.stats().createScope("")), - listener_scope_( - parent_.server_.stats().createScope(fmt::format("listener.{}.", address_->asString()))), bind_to_port_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.deprecated_v1(), bind_to_port, true)), hand_off_restored_destination_connections_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, hidden_envoy_deprecated_use_original_dst, false)), @@ -141,24 +210,28 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, validation_visitor_( added_via_api_ ? parent_.server_.messageValidationContext().dynamicValidationVisitor() : parent_.server_.messageValidationContext().staticValidationVisitor()), - local_init_watcher_(fmt::format("Listener-local-init-watcher {}", name), - [this] { - if (workers_started_) { - parent_.onListenerWarmed(*this); - } else { - // Notify Server that this listener is - // ready. - listener_init_target_.ready(); - } - }), listener_init_target_(fmt::format("Listener-init-target {}", name), - [this]() { dynamic_init_manager_.initialize(local_init_watcher_); }), - dynamic_init_manager_(fmt::format("Listener-local-init-manager {}", name)), - local_drain_manager_(parent.factory_.createDrainManager(config.drain_type())), + [this]() { dynamic_init_manager_->initialize(local_init_watcher_); }), + dynamic_init_manager_(std::make_unique( + fmt::format("Listener-local-init-manager {} {}", name, hash))), config_(config), version_info_(version_info), listener_filters_timeout_( PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)), - continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()) { + continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()), + listener_factory_context_(std::make_shared( + parent.server_, validation_visitor_, config, this, *this, + parent.factory_.createDrainManager(config.drain_type()))), + filter_chain_manager_(address_, listener_factory_context_->parentFactoryContext(), + initManager()), + local_init_watcher_(fmt::format("Listener-local-init-watcher {}", name), [this] { + if (workers_started_) { + parent_.onListenerWarmed(*this); + } else { + // Notify Server that this listener is + // ready. + listener_init_target_.ready(); + } + }) { Network::Address::SocketType socket_type = Network::Utility::protobufAddressSocketType(config.address()); if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, transparent, false)) { @@ -203,18 +276,24 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, fmt::format("error adding listener '{}': Only 1 UDP filter per listener supported", address_->asString())); } - udp_listener_filter_factories_ = - parent_.factory_.createUdpListenerFilterFactoryList(config.listener_filters(), *this); + udp_listener_filter_factories_ = parent_.factory_.createUdpListenerFilterFactoryList( + config.listener_filters(), *listener_factory_context_); break; case Network::Address::SocketType::Stream: - listener_filter_factories_ = - parent_.factory_.createListenerFilterFactoryList(config.listener_filters(), *this); + listener_filter_factories_ = parent_.factory_.createListenerFilterFactoryList( + config.listener_filters(), *listener_factory_context_); break; default: NOT_REACHED_GCOVR_EXCL_LINE; } } + for (const auto& access_log : config.access_log()) { + AccessLog::InstanceSharedPtr current_access_log = + AccessLog::AccessLogFactory::fromProto(access_log, *listener_factory_context_); + access_logs_.push_back(current_access_log); + } + if (config.filter_chains().empty() && (socket_type == Network::Address::SocketType::Stream || !udp_listener_factory_->isTransportConnectionless())) { // If we got here, this is a tcp listener or connection-oriented udp listener, so ensure there @@ -234,11 +313,11 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, } Server::Configuration::TransportSocketFactoryContextImpl transport_factory_context( - parent_.server_.admin(), parent_.server_.sslContextManager(), *listener_scope_, + parent_.server_.admin(), parent_.server_.sslContextManager(), listenerScope(), parent_.server_.clusterManager(), parent_.server_.localInfo(), parent_.server_.dispatcher(), parent_.server_.random(), parent_.server_.stats(), parent_.server_.singletonManager(), parent_.server_.threadLocal(), validation_visitor_, parent_.server_.api()); - transport_factory_context.setInitManager(dynamic_init_manager_); + transport_factory_context.setInitManager(*dynamic_init_manager_); // The init manager is a little messy. Will refactor when filter chain manager could accept // network filter chain update. // TODO(lambdai): create builder from filter_chain_manager to obtain the init manager @@ -268,8 +347,10 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, auto& factory = Config::Utility::getAndCheckFactoryByName( Extensions::ListenerFilters::ListenerFilterNames::get().OriginalDst); - listener_filter_factories_.push_back( - factory.createFilterFactoryFromProto(Envoy::ProtobufWkt::Empty(), *this)); + + listener_filter_factories_.push_back(factory.createListenerFilterFactoryFromProto( + Envoy::ProtobufWkt::Empty(), + /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } // Add proxy protocol listener filter if 'use_proxy_proto' flag is set. // TODO(jrajahalme): This is the last listener filter on purpose. When filter chain matching @@ -279,8 +360,9 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, auto& factory = Config::Utility::getAndCheckFactoryByName( Extensions::ListenerFilters::ListenerFilterNames::get().ProxyProtocol); - listener_filter_factories_.push_back( - factory.createFilterFactoryFromProto(Envoy::ProtobufWkt::Empty(), *this)); + listener_filter_factories_.push_back(factory.createListenerFilterFactoryFromProto( + Envoy::ProtobufWkt::Empty(), + /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } // TODO(zuercher) remove the deprecated TLS inspector name when the deprecated names are removed. @@ -312,8 +394,9 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, auto& factory = Config::Utility::getAndCheckFactoryByName( Extensions::ListenerFilters::ListenerFilterNames::get().TlsInspector); - listener_filter_factories_.push_back( - factory.createFilterFactoryFromProto(Envoy::ProtobufWkt::Empty(), *this)); + listener_filter_factories_.push_back(factory.createListenerFilterFactoryFromProto( + Envoy::ProtobufWkt::Empty(), + /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } if (!workers_started_) { @@ -325,53 +408,84 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, } } -AccessLog::AccessLogManager& ListenerImpl::accessLogManager() { - return parent_.server_.accessLogManager(); -} -Upstream::ClusterManager& ListenerImpl::clusterManager() { - return parent_.server_.clusterManager(); -} -Event::Dispatcher& ListenerImpl::dispatcher() { return parent_.server_.dispatcher(); } -Network::DrainDecision& ListenerImpl::drainDecision() { return *this; } -Grpc::Context& ListenerImpl::grpcContext() { return parent_.server_.grpcContext(); } -bool ListenerImpl::healthCheckFailed() { return parent_.server_.healthCheckFailed(); } -Http::Context& ListenerImpl::httpContext() { return parent_.server_.httpContext(); } - -const LocalInfo::LocalInfo& ListenerImpl::localInfo() const { return parent_.server_.localInfo(); } -Envoy::Runtime::RandomGenerator& ListenerImpl::random() { return parent_.server_.random(); } -Envoy::Runtime::Loader& ListenerImpl::runtime() { return parent_.server_.runtime(); } -Stats::Scope& ListenerImpl::scope() { return *global_scope_; } -Singleton::Manager& ListenerImpl::singletonManager() { return parent_.server_.singletonManager(); } -OverloadManager& ListenerImpl::overloadManager() { return parent_.server_.overloadManager(); } -ThreadLocal::Instance& ListenerImpl::threadLocal() { return parent_.server_.threadLocal(); } -Admin& ListenerImpl::admin() { return parent_.server_.admin(); } -const envoy::config::core::v3::Metadata& ListenerImpl::listenerMetadata() const { - return config_.metadata(); +AccessLog::AccessLogManager& PerListenerFactoryContextImpl::accessLogManager() { + return listener_factory_context_base_->accessLogManager(); +} +Upstream::ClusterManager& PerListenerFactoryContextImpl::clusterManager() { + return listener_factory_context_base_->clusterManager(); +} +Event::Dispatcher& PerListenerFactoryContextImpl::dispatcher() { + return listener_factory_context_base_->dispatcher(); +} +Network::DrainDecision& PerListenerFactoryContextImpl::drainDecision() { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; +} +Grpc::Context& PerListenerFactoryContextImpl::grpcContext() { + return listener_factory_context_base_->grpcContext(); +} +bool PerListenerFactoryContextImpl::healthCheckFailed() { + return listener_factory_context_base_->healthCheckFailed(); +} +Http::Context& PerListenerFactoryContextImpl::httpContext() { + return listener_factory_context_base_->httpContext(); +} +const LocalInfo::LocalInfo& PerListenerFactoryContextImpl::localInfo() const { + return listener_factory_context_base_->localInfo(); +} +Envoy::Runtime::RandomGenerator& PerListenerFactoryContextImpl::random() { + return listener_factory_context_base_->random(); +} +Envoy::Runtime::Loader& PerListenerFactoryContextImpl::runtime() { + return listener_factory_context_base_->runtime(); +} +Stats::Scope& PerListenerFactoryContextImpl::scope() { + return listener_factory_context_base_->scope(); +} +Singleton::Manager& PerListenerFactoryContextImpl::singletonManager() { + return listener_factory_context_base_->singletonManager(); +} +OverloadManager& PerListenerFactoryContextImpl::overloadManager() { + return listener_factory_context_base_->overloadManager(); +} +ThreadLocal::Instance& PerListenerFactoryContextImpl::threadLocal() { + return listener_factory_context_base_->threadLocal(); +} +Admin& PerListenerFactoryContextImpl::admin() { return listener_factory_context_base_->admin(); } +const envoy::config::core::v3::Metadata& PerListenerFactoryContextImpl::listenerMetadata() const { + return listener_factory_context_base_->listenerMetadata(); }; -envoy::config::core::v3::TrafficDirection ListenerImpl::direction() const { - return config_.traffic_direction(); +envoy::config::core::v3::TrafficDirection PerListenerFactoryContextImpl::direction() const { + return listener_factory_context_base_->direction(); }; -TimeSource& ListenerImpl::timeSource() { return api().timeSource(); } - -const Network::ListenerConfig& ListenerImpl::listenerConfig() const { return *this; } -ProtobufMessage::ValidationContext& ListenerImpl::messageValidationContext() { +TimeSource& PerListenerFactoryContextImpl::timeSource() { return api().timeSource(); } +const Network::ListenerConfig& PerListenerFactoryContextImpl::listenerConfig() const { + return *listener_config_; +} +ProtobufMessage::ValidationContext& PerListenerFactoryContextImpl::messageValidationContext() { return getServerFactoryContext().messageValidationContext(); } -ProtobufMessage::ValidationVisitor& ListenerImpl::messageValidationVisitor() { - return validation_visitor_; +ProtobufMessage::ValidationVisitor& PerListenerFactoryContextImpl::messageValidationVisitor() { + return listener_factory_context_base_->messageValidationVisitor(); +} +Api::Api& PerListenerFactoryContextImpl::api() { return listener_factory_context_base_->api(); } +ServerLifecycleNotifier& PerListenerFactoryContextImpl::lifecycleNotifier() { + return listener_factory_context_base_->lifecycleNotifier(); } -Api::Api& ListenerImpl::api() { return parent_.server_.api(); } -ServerLifecycleNotifier& ListenerImpl::lifecycleNotifier() { - return parent_.server_.lifecycleNotifier(); +ProcessContextOptRef PerListenerFactoryContextImpl::processContext() { + return listener_factory_context_base_->processContext(); } -ProcessContextOptRef ListenerImpl::processContext() { return parent_.server_.processContext(); } -Configuration::ServerFactoryContext& ListenerImpl::getServerFactoryContext() const { - return parent_.server_.serverFactoryContext(); +Configuration::ServerFactoryContext& +PerListenerFactoryContextImpl::getServerFactoryContext() const { + return listener_factory_context_base_->getServerFactoryContext(); } Configuration::TransportSocketFactoryContext& -ListenerImpl::getTransportSocketFactoryContext() const { - return parent_.server_.transportSocketFactoryContext(); +PerListenerFactoryContextImpl::getTransportSocketFactoryContext() const { + return listener_factory_context_base_->getTransportSocketFactoryContext(); +} +Stats::Scope& PerListenerFactoryContextImpl::listenerScope() { + return listener_factory_context_base_->listenerScope(); } +Init::Manager& PerListenerFactoryContextImpl::initManager() { return listener_impl_.initManager(); } bool ListenerImpl::createNetworkFilterChain( Network::Connection& connection, @@ -389,20 +503,13 @@ void ListenerImpl::createUdpListenerFilterChain(Network::UdpListenerFilterManage udp_listener_filter_factories_); } -bool ListenerImpl::drainClose() const { - // When a listener is draining, the "drain close" decision is the union of the per-listener drain - // manager and the server wide drain manager. This allows individual listeners to be drained and - // removed independently of a server-wide drain event (e.g., /healthcheck/fail or hot restart). - return local_drain_manager_->drainClose() || parent_.server_.drainManager().drainClose(); -} - void ListenerImpl::debugLog(const std::string& message) { UNREFERENCED_PARAMETER(message); ENVOY_LOG(debug, "{}: name={}, hash={}, address={}", message, name_, hash_, address_->asString()); } void ListenerImpl::initialize() { - last_updated_ = timeSource().systemTime(); + last_updated_ = listener_factory_context_->timeSource().systemTime(); // If workers have already started, we shift from using the global init manager to using a local // per listener init manager. See ~ListenerImpl() for why we gate the onListenerWarmed() call // by resetting the watcher. @@ -410,7 +517,7 @@ void ListenerImpl::initialize() { ENVOY_LOG_MISC(debug, "Initialize listener {} local-init-manager.", name_); // If workers_started_ is true, dynamic_init_manager_ should be initialized by listener manager // directly. - dynamic_init_manager_.initialize(local_init_watcher_); + dynamic_init_manager_->initialize(local_init_watcher_); } } @@ -422,7 +529,7 @@ ListenerImpl::~ListenerImpl() { } } -Init::Manager& ListenerImpl::initManager() { return dynamic_init_manager_; } +Init::Manager& ListenerImpl::initManager() { return *dynamic_init_manager_; } void ListenerImpl::setSocketFactory(const Network::ListenSocketFactorySharedPtr& socket_factory) { ASSERT(!socket_factory_); diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index ec4e21a6ed59..53e27b02024b 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -2,11 +2,14 @@ #include +#include "envoy/access_log/access_log.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/network/drain_decision.h" #include "envoy/network/filter.h" #include "envoy/server/drain_manager.h" #include "envoy/server/filter_config.h" +#include "envoy/server/instance.h" #include "envoy/server/listener_manager.h" #include "envoy/stats/scope.h" @@ -73,14 +76,130 @@ class ListenSocketFactoryImpl : public Network::ListenSocketFactory, // TODO(mattklein123): Consider getting rid of pre-worker start and post-worker start code by // initializing all listeners after workers are started. +/** + * The common functionality shared by PerListenerFilterFactoryContexts and + * PerFilterChainFactoryFactoryContexts. + */ +class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContext, + public Network::DrainDecision { +public: + ListenerFactoryContextBaseImpl(Envoy::Server::Instance& server, + ProtobufMessage::ValidationVisitor& validation_visitor, + const envoy::config::listener::v3::Listener& config, + Server::DrainManagerPtr drain_manager); + AccessLog::AccessLogManager& accessLogManager() override; + Upstream::ClusterManager& clusterManager() override; + Event::Dispatcher& dispatcher() override; + Network::DrainDecision& drainDecision() override; + Grpc::Context& grpcContext() override; + bool healthCheckFailed() override; + Http::Context& httpContext() override; + Init::Manager& initManager() override; + const LocalInfo::LocalInfo& localInfo() const override; + Envoy::Runtime::RandomGenerator& random() override; + Envoy::Runtime::Loader& runtime() override; + Stats::Scope& scope() override; + Singleton::Manager& singletonManager() override; + OverloadManager& overloadManager() override; + ThreadLocal::Instance& threadLocal() override; + Admin& admin() override; + const envoy::config::core::v3::Metadata& listenerMetadata() const override; + envoy::config::core::v3::TrafficDirection direction() const override; + TimeSource& timeSource() override; + ProtobufMessage::ValidationContext& messageValidationContext() override; + ProtobufMessage::ValidationVisitor& messageValidationVisitor() override; + Api::Api& api() override; + ServerLifecycleNotifier& lifecycleNotifier() override; + ProcessContextOptRef processContext() override; + Configuration::ServerFactoryContext& getServerFactoryContext() const override; + Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override; + Stats::Scope& listenerScope() override; + + // DrainDecision + bool drainClose() const override { + return drain_manager_->drainClose() || server_.drainManager().drainClose(); + } + Server::DrainManager& drainManager(); + +private: + Envoy::Server::Instance& server_; + const envoy::config::core::v3::Metadata metadata_; + envoy::config::core::v3::TrafficDirection direction_; + Stats::ScopePtr global_scope_; + Stats::ScopePtr listener_scope_; // Stats with listener named scope. + ProtobufMessage::ValidationVisitor& validation_visitor_; + const Server::DrainManagerPtr drain_manager_; +}; + +class ListenerImpl; + +// TODO(lambdai): Strip the interface since ListenerFactoryContext only need to support +// ListenerFilterChain creation. e.g, Is listenerMetaData() required? Is it required only at +// listener update or during the lifetime of listener? +class PerListenerFactoryContextImpl : public Configuration::ListenerFactoryContext { +public: + PerListenerFactoryContextImpl(Envoy::Server::Instance& server, + ProtobufMessage::ValidationVisitor& validation_visitor, + const envoy::config::listener::v3::Listener& config_message, + const Network::ListenerConfig* listener_config, + ListenerImpl& listener_impl, DrainManagerPtr drain_manager) + : listener_factory_context_base_(std::make_shared( + server, validation_visitor, config_message, std::move(drain_manager))), + listener_config_(listener_config), listener_impl_(listener_impl) {} + PerListenerFactoryContextImpl( + std::shared_ptr listener_factory_context_base, + const Network::ListenerConfig* listener_config, ListenerImpl& listener_impl) + : listener_factory_context_base_(listener_factory_context_base), + listener_config_(listener_config), listener_impl_(listener_impl) {} + + // FactoryContext + AccessLog::AccessLogManager& accessLogManager() override; + Upstream::ClusterManager& clusterManager() override; + Event::Dispatcher& dispatcher() override; + Network::DrainDecision& drainDecision() override; + Grpc::Context& grpcContext() override; + bool healthCheckFailed() override; + Http::Context& httpContext() override; + Init::Manager& initManager() override; + const LocalInfo::LocalInfo& localInfo() const override; + Envoy::Runtime::RandomGenerator& random() override; + Envoy::Runtime::Loader& runtime() override; + Stats::Scope& scope() override; + Singleton::Manager& singletonManager() override; + OverloadManager& overloadManager() override; + ThreadLocal::Instance& threadLocal() override; + Admin& admin() override; + const envoy::config::core::v3::Metadata& listenerMetadata() const override; + envoy::config::core::v3::TrafficDirection direction() const override; + TimeSource& timeSource() override; + ProtobufMessage::ValidationContext& messageValidationContext() override; + ProtobufMessage::ValidationVisitor& messageValidationVisitor() override; + Api::Api& api() override; + ServerLifecycleNotifier& lifecycleNotifier() override; + ProcessContextOptRef processContext() override; + Configuration::ServerFactoryContext& getServerFactoryContext() const override; + Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override; + + Stats::Scope& listenerScope() override; + + // ListenerFactoryContext + const Network::ListenerConfig& listenerConfig() const override; + + ListenerFactoryContextBaseImpl& parentFactoryContext() { return *listener_factory_context_base_; } + friend class ListenerImpl; + +private: + std::shared_ptr listener_factory_context_base_; + const Network::ListenerConfig* listener_config_; + ListenerImpl& listener_impl_; +}; + /** * Maps proto config to runtime config for a listener with a network filter chain. */ -class ListenerImpl : public Network::ListenerConfig, - public Configuration::ListenerFactoryContext, - public Network::DrainDecision, - public Network::FilterChainFactory, - Logger::Loggable { +class ListenerImpl final : public Network::ListenerConfig, + public Network::FilterChainFactory, + Logger::Loggable { public: /** * Create a new listener. @@ -120,7 +239,9 @@ class ListenerImpl : public Network::ListenerConfig, const Network::ListenSocketFactorySharedPtr& getSocketFactory() const { return socket_factory_; } void debugLog(const std::string& message); void initialize(); - DrainManager& localDrainManager() const { return *local_drain_manager_; } + DrainManager& localDrainManager() const { + return listener_factory_context_->listener_factory_context_base_->drainManager(); + } void setSocketFactory(const Network::ListenSocketFactorySharedPtr& socket_factory); void setSocketAndOptions(const Network::SocketSharedPtr& socket); const Network::Socket::OptionsSharedPtr& listenSocketOptions() { return listen_socket_options_; } @@ -143,42 +264,20 @@ class ListenerImpl : public Network::ListenerConfig, bool continueOnListenerFiltersTimeout() const override { return continue_on_listener_filters_timeout_; } - Stats::Scope& listenerScope() override { return *listener_scope_; } + Stats::Scope& listenerScope() override { return listener_factory_context_->listenerScope(); } uint64_t listenerTag() const override { return listener_tag_; } const std::string& name() const override { return name_; } Network::ActiveUdpListenerFactory* udpListenerFactory() override { return udp_listener_factory_.get(); } Network::ConnectionBalancer& connectionBalancer() override { return *connection_balancer_; } - - // Server::Configuration::ListenerFactoryContext - AccessLog::AccessLogManager& accessLogManager() override; - Upstream::ClusterManager& clusterManager() override; - Event::Dispatcher& dispatcher() override; - Network::DrainDecision& drainDecision() override; - Grpc::Context& grpcContext() override; - bool healthCheckFailed() override; - Http::Context& httpContext() override; - Init::Manager& initManager() override; - const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Runtime::RandomGenerator& random() override; - Envoy::Runtime::Loader& runtime() override; - Stats::Scope& scope() override; - Singleton::Manager& singletonManager() override; - OverloadManager& overloadManager() override; - ThreadLocal::Instance& threadLocal() override; - Admin& admin() override; - const envoy::config::core::v3::Metadata& listenerMetadata() const override; - envoy::config::core::v3::TrafficDirection direction() const override; - TimeSource& timeSource() override; - const Network::ListenerConfig& listenerConfig() const override; - ProtobufMessage::ValidationContext& messageValidationContext() override; - ProtobufMessage::ValidationVisitor& messageValidationVisitor() override; - Api::Api& api() override; - ServerLifecycleNotifier& lifecycleNotifier() override; - ProcessContextOptRef processContext() override; - Configuration::ServerFactoryContext& getServerFactoryContext() const override; - Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override; + const std::vector& accessLogs() const override { + return access_logs_; + } + Init::Manager& initManager(); + envoy::config::core::v3::TrafficDirection direction() const override { + return config().traffic_direction(); + } void ensureSocketOptions() { if (!listen_socket_options_) { @@ -186,8 +285,6 @@ class ListenerImpl : public Network::ListenerConfig, std::make_shared>(); } } - // Network::DrainDecision - bool drainClose() const override; // Network::FilterChainFactory bool createNetworkFilterChain(Network::Connection& connection, @@ -210,11 +307,8 @@ class ListenerImpl : public Network::ListenerConfig, ListenerManagerImpl& parent_; Network::Address::InstanceConstSharedPtr address_; - FilterChainManagerImpl filter_chain_manager_; Network::ListenSocketFactorySharedPtr socket_factory_; - Stats::ScopePtr global_scope_; // Stats with global named scope, but needed for LDS cleanup. - Stats::ScopePtr listener_scope_; // Stats with listener named scope. const bool bind_to_port_; const bool hand_off_restored_destination_connections_; const uint32_t per_connection_buffer_limit_bytes_; @@ -225,17 +319,15 @@ class ListenerImpl : public Network::ListenerConfig, const uint64_t hash_; ProtobufMessage::ValidationVisitor& validation_visitor_; - // This init watcher, if workers_started_ is false, notifies the "parent" listener manager when - // listener initialization is complete. - Init::WatcherImpl local_init_watcher_; // A target is added to Server's InitManager if workers_started_ is false. Init::TargetImpl listener_init_target_; // This init manager is populated with targets from the filter chain factories, namely // RdsRouteConfigSubscription::init_target_, so the listener can wait for route configs. - Init::ManagerImpl dynamic_init_manager_; + std::unique_ptr dynamic_init_manager_; std::vector listener_filter_factories_; std::vector udp_listener_filter_factories_; + std::vector access_logs_; DrainManagerPtr local_drain_manager_; bool saw_listener_create_failure_{}; const envoy::config::listener::v3::Listener config_; @@ -245,6 +337,14 @@ class ListenerImpl : public Network::ListenerConfig, const bool continue_on_listener_filters_timeout_; Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; Network::ConnectionBalancerPtr connection_balancer_; + std::shared_ptr listener_factory_context_; + FilterChainManagerImpl filter_chain_manager_; + + // This init watcher, if workers_started_ is false, notifies the "parent" listener manager when + // listener initialization is complete. + // Important: local_init_watcher_ must be the last field in the class to avoid unexpected watcher + // callback during the destroy of ListenerImpl. + Init::WatcherImpl local_init_watcher_; // to access ListenerManagerImpl::factory_. friend class ListenerFilterChainFactoryBuilder; diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 049ef7dd3aa3..cf3288004b23 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -7,6 +7,8 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/listener.pb.h" #include "envoy/config/listener/v3/listener_components.pb.h" +#include "envoy/network/filter.h" +#include "envoy/network/listener.h" #include "envoy/registry/registry.h" #include "envoy/server/active_udp_listener_config.h" #include "envoy/server/transport_socket_config.h" @@ -16,6 +18,7 @@ #include "common/common/fmt.h" #include "common/config/utility.h" #include "common/config/version_converter.h" +#include "common/network/filter_matcher.h" #include "common/network/io_socket_handle_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/socket_option_factory.h" @@ -139,7 +142,8 @@ ProdListenerComponentFactory::createListenerFilterFactoryList_( proto_config); auto message = Config::Utility::translateToFactoryConfig( proto_config, context.messageValidationVisitor(), factory); - ret.push_back(factory.createFilterFactoryFromProto(*message, context)); + ret.push_back(factory.createListenerFilterFactoryFromProto( + *message, createListenerFilterMatcher(proto_config), context)); } return ret; } @@ -173,6 +177,16 @@ ProdListenerComponentFactory::createUdpListenerFilterFactoryList_( return ret; } +Network::ListenerFilterMatcherSharedPtr ProdListenerComponentFactory::createListenerFilterMatcher( + const envoy::config::listener::v3::ListenerFilter& listener_filter) { + if (!listener_filter.has_filter_disabled()) { + return nullptr; + } + return std::shared_ptr( + Network::ListenerFilterMatcherBuilder::buildListenerFilterMatcher( + listener_filter.filter_disabled())); +} + Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( Network::Address::InstanceConstSharedPtr address, Network::Address::SocketType socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) { @@ -202,8 +216,8 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( } const std::string scheme = (socket_type == Network::Address::SocketType::Stream) - ? Network::Utility::TCP_SCHEME - : Network::Utility::UDP_SCHEME; + ? std::string(Network::Utility::TCP_SCHEME) + : std::string(Network::Utility::UDP_SCHEME); const std::string addr = absl::StrCat(scheme, address->asString()); if (params.bind_to_port && params.duplicate_parent_socket) { @@ -558,6 +572,17 @@ void ListenerManagerImpl::drainListener(ListenerImplPtr&& listener) { // main thread to avoid locking. This makes sure that we don't destroy the listener // while filters might still be using its context (stats, etc.). server_.dispatcher().post([this, draining_it]() -> void { + // TODO(lambdai): Resolve race condition below. + // Consider the below events in global sequence order + // master thread: calling drainListener + // work thread: deferred delete the active connection + // work thread: post to master that the drain is done + // master thread: erase the listener + // worker thread: execute destroying connection when the shared listener config is + // destroyed at step 4 (could be worse such as access the connection because connection is + // not yet started to deleted). The race condition is introduced because 3 occurs too + // early. My solution is to defer schedule the callback posting to master thread, by + // introducing DeferTaskUtil. So that 5 should always happen before 3. if (--draining_it->workers_pending_removal_ == 0) { draining_it->listener_->debugLog("draining listener removal complete"); draining_listeners_.erase(draining_it); @@ -628,7 +653,7 @@ void ListenerManagerImpl::onListenerWarmed(ListenerImpl& listener) { // The warmed listener should be added first so that the worker will accept new connections // when it stops listening on the old listener. for (const auto& worker : workers_) { - addListenerToWorker(*worker, listener, nullptr); + addListenerToWorker(*worker, listener, /* callback */ nullptr); } auto existing_active_listener = getListenerByName(active_listeners_, listener.name()); @@ -723,7 +748,7 @@ void ListenerManagerImpl::startWorkers(GuardDog& guard_dog) { } i++; } - if (active_listeners_.size() == 0) { + if (active_listeners_.empty()) { stats_.workers_started_.set(1); } } @@ -790,8 +815,8 @@ void ListenerManagerImpl::endListenerUpdate(FailureStates&& failure_states) { ListenerFilterChainFactoryBuilder::ListenerFilterChainFactoryBuilder( ListenerImpl& listener, Server::Configuration::TransportSocketFactoryContextImpl& factory_context) - : ListenerFilterChainFactoryBuilder(listener.messageValidationVisitor(), - listener.parent_.factory_, factory_context) {} + : ListenerFilterChainFactoryBuilder(listener.validation_visitor_, listener.parent_.factory_, + factory_context) {} ListenerFilterChainFactoryBuilder::ListenerFilterChainFactoryBuilder( ProtobufMessage::ValidationVisitor& validator, @@ -800,16 +825,18 @@ ListenerFilterChainFactoryBuilder::ListenerFilterChainFactoryBuilder( : validator_(validator), listener_component_factory_(listener_component_factory), factory_context_(factory_context) {} -std::unique_ptr ListenerFilterChainFactoryBuilder::buildFilterChain( +std::shared_ptr ListenerFilterChainFactoryBuilder::buildFilterChain( const envoy::config::listener::v3::FilterChain& filter_chain, FilterChainFactoryContextCreator& context_creator) const { return buildFilterChainInternal(filter_chain, context_creator.createFilterChainFactoryContext(&filter_chain)); } -std::unique_ptr ListenerFilterChainFactoryBuilder::buildFilterChainInternal( +std::shared_ptr +ListenerFilterChainFactoryBuilder::buildFilterChainInternal( const envoy::config::listener::v3::FilterChain& filter_chain, - Configuration::FilterChainFactoryContext& filter_chain_factory_context) const { + std::unique_ptr&& filter_chain_factory_context) + const { // If the cluster doesn't have transport socket configured, then use the default "raw_buffer" // transport socket or BoringSSL-based "tls" transport socket if TLS settings are configured. // We copy by value first then override if necessary. @@ -832,11 +859,14 @@ std::unique_ptr ListenerFilterChainFactoryBuilder::buildFi std::vector server_names(filter_chain.filter_chain_match().server_names().begin(), filter_chain.filter_chain_match().server_names().end()); - return std::make_unique( - config_factory.createTransportSocketFactory(*message, factory_context_, - std::move(server_names)), - listener_component_factory_.createNetworkFilterFactoryList(filter_chain.filters(), - filter_chain_factory_context)); + + auto filter_chain_res = + std::make_unique(config_factory.createTransportSocketFactory( + *message, factory_context_, std::move(server_names)), + listener_component_factory_.createNetworkFilterFactoryList( + filter_chain.filters(), *filter_chain_factory_context)); + filter_chain_res->setFilterChainFactoryContext(std::move(filter_chain_factory_context)); + return filter_chain_res; } Network::ListenSocketFactorySharedPtr ListenerManagerImpl::createListenSocketFactory( diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index 6b24610fff7d..3a897e82946b 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -63,6 +63,9 @@ class ProdListenerComponentFactory : public ListenerComponentFactory, const Protobuf::RepeatedPtrField& filters, Configuration::ListenerFactoryContext& context); + static Network::ListenerFilterMatcherSharedPtr + createListenerFilterMatcher(const envoy::config::listener::v3::ListenerFilter& listener_filter); + // Server::ListenerComponentFactory LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) override { return std::make_unique( @@ -260,14 +263,15 @@ class ListenerFilterChainFactoryBuilder : public FilterChainFactoryBuilder { ListenerComponentFactory& listener_component_factory, Server::Configuration::TransportSocketFactoryContextImpl& factory_context); - std::unique_ptr + std::shared_ptr buildFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain, FilterChainFactoryContextCreator& context_creator) const override; private: - std::unique_ptr buildFilterChainInternal( - const envoy::config::listener::v3::FilterChain& filter_chain, - Configuration::FilterChainFactoryContext& filter_chain_factory_context) const; + std::shared_ptr + buildFilterChainInternal(const envoy::config::listener::v3::FilterChain& filter_chain, + std::unique_ptr&& + filter_chain_factory_context) const; ProtobufMessage::ValidationVisitor& validator_; ListenerComponentFactory& listener_component_factory_; diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 723ef5a4b71a..f054546f11c7 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -24,6 +24,8 @@ namespace Envoy { namespace { std::vector toArgsVector(int argc, const char* const* argv) { std::vector args; + args.reserve(argc); + for (int i = 0; i < argc; ++i) { args.emplace_back(argv[i]); } @@ -232,7 +234,7 @@ OptionsImpl::OptionsImpl(std::vector args, } if (!disable_extensions.getValue().empty()) { - disabled_extensions_ = absl::StrSplit(disable_extensions.getValue(), ","); + disabled_extensions_ = absl::StrSplit(disable_extensions.getValue(), ','); } } @@ -353,7 +355,7 @@ OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& void OptionsImpl::disableExtensions(const std::vector& names) { for (const auto& name : names) { - const std::vector parts = absl::StrSplit(name, absl::MaxSplits("/", 1)); + const std::vector parts = absl::StrSplit(name, absl::MaxSplits('/', 1)); if (parts.size() != 2) { ENVOY_LOG_MISC(warn, "failed to disable invalid extension name '{}'", name); diff --git a/source/server/server.cc b/source/server/server.cc index b524996ca916..56f90fc504ad 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -42,6 +42,7 @@ #include "server/configuration_impl.h" #include "server/connection_handler_impl.h" #include "server/guarddog_impl.h" +#include "server/http/utils.h" #include "server/listener_hooks.h" #include "server/ssl_context_manager.h" #include "server/wasm_config_impl.h" @@ -123,7 +124,9 @@ InstanceImpl::~InstanceImpl() { // RdsRouteConfigSubscription is an Init::Target, ~RdsRouteConfigSubscription triggers a callback // set at initialization, which goes to unregister it from the top-level InitManager, which has // already been destructed (use-after-free) causing a segfault. + ENVOY_LOG(debug, "destroying listener manager"); listener_manager_.reset(); + ENVOY_LOG(debug, "destroyed listener manager"); } Upstream::ClusterManager& InstanceImpl::clusterManager() { return *config_.clusterManager(); } @@ -519,7 +522,7 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch }); sig_usr_1_ = dispatcher.listenForSignal(SIGUSR1, [&access_log_manager]() { - ENVOY_LOG(warn, "caught SIGUSR1"); + ENVOY_LOG(info, "caught SIGUSR1. Reopening access logs."); access_log_manager.reopen(); }); @@ -698,4 +701,4 @@ ProtobufTypes::MessagePtr InstanceImpl::dumpBootstrapConfig() { } } // namespace Server -} // namespace Envoy +} // namespace Envoy \ No newline at end of file diff --git a/source/server/server.h b/source/server/server.h index 9e0ec3a365c3..b29ed3f2bcac 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -170,6 +170,7 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, TimeSource& timeSource() override { return api().timeSource(); } Api::Api& api() override { return server_.api(); } Grpc::Context& grpcContext() override { return server_.grpcContext(); } + Envoy::Server::DrainManager& drainManager() override { return server_.drainManager(); } // Configuration::TransportSocketFactoryContext Ssl::ContextManager& sslContextManager() override { return server_.sslContextManager(); } diff --git a/test/common/access_log/BUILD b/test/common/access_log/BUILD index 74be77ad3532..43a9829a02ab 100644 --- a/test/common/access_log/BUILD +++ b/test/common/access_log/BUILD @@ -2,9 +2,10 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", "envoy_cc_fuzz_test", "envoy_cc_test", - "envoy_cc_test_binary", "envoy_package", "envoy_proto_library", ) @@ -77,8 +78,8 @@ envoy_cc_test( srcs = ["access_log_manager_impl_test.cc"], deps = [ "//source/common/access_log:access_log_manager_lib", - "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/access_log:access_log_mocks", "//test/mocks/api:api_mocks", "//test/mocks/event:event_mocks", @@ -86,7 +87,7 @@ envoy_cc_test( ], ) -envoy_cc_test_binary( +envoy_cc_benchmark_binary( name = "access_log_formatter_speed_test", srcs = ["access_log_formatter_speed_test.cc"], external_deps = [ @@ -102,3 +103,8 @@ envoy_cc_test_binary( "//test/test_common:printers_lib", ], ) + +envoy_benchmark_test( + name = "access_log_formatter_speed_test_benchmark_test", + benchmark_binary = "access_log_formatter_speed_test", +) diff --git a/test/common/access_log/access_log_formatter_speed_test.cc b/test/common/access_log/access_log_formatter_speed_test.cc index fecc7f35cfc6..c946cfab8ed1 100644 --- a/test/common/access_log/access_log_formatter_speed_test.cc +++ b/test/common/access_log/access_log_formatter_speed_test.cc @@ -8,16 +8,44 @@ namespace { -static std::unique_ptr formatter; -static std::unique_ptr json_formatter; -static std::unique_ptr typed_json_formatter; -static std::unique_ptr stream_info; +std::unique_ptr MakeJsonFormatter(bool typed) { + std::unordered_map JsonLogFormat = { + {"remote_address", "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%"}, + {"start_time", "%START_TIME(%Y/%m/%dT%H:%M:%S%z %s)%"}, + {"method", "%REQ(:METHOD)%"}, + {"url", "%REQ(X-FORWARDED-PROTO)%://%REQ(:AUTHORITY)%%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%"}, + {"protocol", "%PROTOCOL%"}, + {"respoinse_code", "%RESPONSE_CODE%"}, + {"bytes_sent", "%BYTES_SENT%"}, + {"duration", "%DURATION%"}, + {"referer", "%REQ(REFERER)%"}, + {"user-agent", "%REQ(USER-AGENT)%"}}; + + return std::make_unique(JsonLogFormat, typed); +} + +std::unique_ptr makeStreamInfo() { + auto stream_info = std::make_unique(); + stream_info->setDownstreamRemoteAddress( + std::make_shared("203.0.113.1")); + return stream_info; +} } // namespace namespace Envoy { static void BM_AccessLogFormatter(benchmark::State& state) { + std::unique_ptr stream_info = makeStreamInfo(); + static const char* LogFormat = + "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% %START_TIME(%Y/%m/%dT%H:%M:%S%z %s)% " + "%REQ(:METHOD)% " + "%REQ(X-FORWARDED-PROTO)%://%REQ(:AUTHORITY)%%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% " + "s%RESPONSE_CODE% %BYTES_SENT% %DURATION% %REQ(REFERER)% \"%REQ(USER-AGENT)%\" - - -\n"; + + std::unique_ptr formatter = + std::make_unique(LogFormat); + size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; @@ -32,6 +60,9 @@ static void BM_AccessLogFormatter(benchmark::State& state) { BENCHMARK(BM_AccessLogFormatter); static void BM_JsonAccessLogFormatter(benchmark::State& state) { + std::unique_ptr stream_info = makeStreamInfo(); + std::unique_ptr json_formatter = MakeJsonFormatter(false); + size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; @@ -46,6 +77,10 @@ static void BM_JsonAccessLogFormatter(benchmark::State& state) { BENCHMARK(BM_JsonAccessLogFormatter); static void BM_TypedJsonAccessLogFormatter(benchmark::State& state) { + std::unique_ptr stream_info = makeStreamInfo(); + std::unique_ptr typed_json_formatter = + MakeJsonFormatter(true); + size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; @@ -60,38 +95,3 @@ static void BM_TypedJsonAccessLogFormatter(benchmark::State& state) { BENCHMARK(BM_TypedJsonAccessLogFormatter); } // namespace Envoy - -// Boilerplate main(), which discovers benchmarks in the same file and runs them. -int main(int argc, char** argv) { - static const char* LogFormat = - "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% %START_TIME(%Y/%m/%dT%H:%M:%S%z %s)% " - "%REQ(:METHOD)% " - "%REQ(X-FORWARDED-PROTO)%://%REQ(:AUTHORITY)%%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% " - "s%RESPONSE_CODE% %BYTES_SENT% %DURATION% %REQ(REFERER)% \"%REQ(USER-AGENT)%\" - - -\n"; - - formatter = std::make_unique(LogFormat); - - std::unordered_map JsonLogFormat = { - {"remote_address", "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%"}, - {"start_time", "%START_TIME(%Y/%m/%dT%H:%M:%S%z %s)%"}, - {"method", "%REQ(:METHOD)%"}, - {"url", "%REQ(X-FORWARDED-PROTO)%://%REQ(:AUTHORITY)%%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%"}, - {"protocol", "%PROTOCOL%"}, - {"respoinse_code", "%RESPONSE_CODE%"}, - {"bytes_sent", "%BYTES_SENT%"}, - {"duration", "%DURATION%"}, - {"referer", "%REQ(REFERER)%"}, - {"user-agent", "%REQ(USER-AGENT)%"}}; - - json_formatter = std::make_unique(JsonLogFormat, false); - typed_json_formatter = std::make_unique(JsonLogFormat, true); - - stream_info = std::make_unique(); - stream_info->setDownstreamRemoteAddress( - std::make_shared("203.0.113.1")); - benchmark::Initialize(&argc, argv); - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; - } - benchmark::RunSpecifiedBenchmarks(); -} diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/access_log/access_log_formatter_test.cc index d2143438173c..d0903f07ccde 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/access_log/access_log_formatter_test.cc @@ -367,6 +367,39 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::stringValue("127.0.0.2"))); } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_PORT"); + + // Validate for IPv4 address + auto address = Network::Address::InstanceConstSharedPtr{ + new Network::Address::Ipv4Instance("127.1.2.3", 8443)}; + EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); + EXPECT_EQ("8443", upstream_format.format(request_headers, response_headers, response_trailers, + stream_info)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info), + ProtoEq(ValueUtil::stringValue("8443"))); + + // Validate for IPv6 address + address = + Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance("::1", 9443)}; + EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); + EXPECT_EQ("9443", upstream_format.format(request_headers, response_headers, response_trailers, + stream_info)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info), + ProtoEq(ValueUtil::stringValue("9443"))); + + // Validate for Pipe + address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance("/foo")}; + EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); + EXPECT_EQ("", upstream_format.format(request_headers, response_headers, response_trailers, + stream_info)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info), + ProtoEq(ValueUtil::stringValue(""))); + } + { StreamInfoFormatter upstream_format("DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT"); EXPECT_EQ("127.0.0.1", upstream_format.format(request_headers, response_headers, diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 5eb48d0fb9fe..2222f08e88b2 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -12,7 +12,6 @@ #include "common/config/utility.h" #include "common/protobuf/message_validator_impl.h" #include "common/runtime/runtime_impl.h" -#include "common/runtime/uuid_util.h" #include "test/common/stream_info/test_util.h" #include "test/common/upstream/utility.h" @@ -278,6 +277,7 @@ name: accesslog path: /dev/null )EOF"; + Runtime::RandomGeneratorImpl random; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); // Value is taken from random generator. @@ -320,6 +320,7 @@ name: accesslog path: /dev/null )EOF"; + Runtime::RandomGeneratorImpl random; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); // Value is taken from random generator. @@ -438,13 +439,6 @@ name: accesslog TEST_F(AccessLogImplTest, RequestTracing) { Runtime::RandomGeneratorImpl random; - std::string not_traceable_guid = random.uuid(); - - std::string force_tracing_guid = random.uuid(); - UuidUtils::setTraceableUuid(force_tracing_guid, UuidTraceStatus::Forced); - - std::string sample_tracing_guid = random.uuid(); - UuidUtils::setTraceableUuid(sample_tracing_guid, UuidTraceStatus::Sampled); const std::string yaml = R"EOF( name: accesslog @@ -458,19 +452,22 @@ name: accesslog InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); { - Http::TestRequestHeaderMapImpl forced_header{{"x-request-id", force_tracing_guid}}; + Http::TestRequestHeaderMapImpl forced_header{{"x-request-id", random.uuid()}}; + stream_info_.getRequestIDExtension()->setTraceStatus(forced_header, Http::TraceStatus::Forced); EXPECT_CALL(*file_, write(_)); log->log(&forced_header, &response_headers_, &response_trailers_, stream_info_); } { - Http::TestRequestHeaderMapImpl not_traceable{{"x-request-id", not_traceable_guid}}; + Http::TestRequestHeaderMapImpl not_traceable{{"x-request-id", random.uuid()}}; EXPECT_CALL(*file_, write(_)).Times(0); log->log(¬_traceable, &response_headers_, &response_trailers_, stream_info_); } { - Http::TestRequestHeaderMapImpl sampled_header{{"x-request-id", sample_tracing_guid}}; + Http::TestRequestHeaderMapImpl sampled_header{{"x-request-id", random.uuid()}}; + stream_info_.getRequestIDExtension()->setTraceStatus(sampled_header, + Http::TraceStatus::Sampled); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&sampled_header, &response_headers_, &response_trailers_, stream_info_); } diff --git a/test/common/access_log/access_log_manager_impl_test.cc b/test/common/access_log/access_log_manager_impl_test.cc index bb31ba98b558..799b8fc8bee3 100644 --- a/test/common/access_log/access_log_manager_impl_test.cc +++ b/test/common/access_log/access_log_manager_impl_test.cc @@ -2,8 +2,8 @@ #include "common/access_log/access_log_manager_impl.h" #include "common/filesystem/file_shared_impl.h" -#include "common/stats/isolated_store_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/access_log/mocks.h" #include "test/mocks/api/mocks.h" #include "test/mocks/event/mocks.h" @@ -51,7 +51,7 @@ class AccessLogManagerImplTest : public testing::Test { NiceMock file_system_; NiceMock* file_; const std::chrono::milliseconds timeout_40ms_{40}; - Stats::IsolatedStoreImpl store_; + Stats::TestUtil::TestStore store_; Thread::ThreadFactory& thread_factory_; NiceMock dispatcher_; Thread::MutexBasicLockable lock_; diff --git a/test/common/buffer/BUILD b/test/common/buffer/BUILD index 33c29e528b87..d91cc0b57354 100644 --- a/test/common/buffer/BUILD +++ b/test/common/buffer/BUILD @@ -2,9 +2,10 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", "envoy_cc_fuzz_test", "envoy_cc_test", - "envoy_cc_test_binary", "envoy_cc_test_library", "envoy_package", "envoy_proto_library", @@ -90,7 +91,7 @@ envoy_cc_test( ], ) -envoy_cc_test_binary( +envoy_cc_benchmark_binary( name = "buffer_speed_test", srcs = ["buffer_speed_test.cc"], external_deps = [ @@ -100,3 +101,8 @@ envoy_cc_test_binary( "//source/common/buffer:buffer_lib", ], ) + +envoy_benchmark_test( + name = "buffer_speed_test_benchmark_test", + benchmark_binary = "buffer_speed_test", +) diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc index 9a60b0189e26..86e168532c9b 100644 --- a/test/common/buffer/buffer_fuzz.cc +++ b/test/common/buffer/buffer_fuzz.cc @@ -114,14 +114,10 @@ class StringBuffer : public Buffer::Instance { size_ -= size; } - uint64_t getRawSlices(Buffer::RawSlice* out, uint64_t out_size) const override { - if (out_size == 0) { - return 1; - } - // Sketchy, but probably will work for test purposes. - out->mem_ = const_cast(start()); - out->len_ = size_; - return 1; + Buffer::RawSliceVector + getRawSlices(absl::optional max_slices = absl::nullopt) const override { + ASSERT(!max_slices.has_value() || max_slices.value() >= 1); + return {{const_cast(start()), size_}}; } uint64_t length() const override { return size_; } @@ -382,14 +378,14 @@ uint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, Buff break; } case test::common::buffer::Action::kGetRawSlices: { - const uint64_t slices_needed = target_buffer.getRawSlices(nullptr, 0); + const uint64_t slices_needed = target_buffer.getRawSlices().size(); const uint64_t slices_tested = std::min(slices_needed, static_cast(action.get_raw_slices())); if (slices_tested == 0) { break; } - std::vector raw_slices{slices_tested}; - const uint64_t slices_obtained = target_buffer.getRawSlices(raw_slices.data(), slices_tested); + Buffer::RawSliceVector raw_slices = target_buffer.getRawSlices(/*max_slices=*/slices_tested); + const uint64_t slices_obtained = raw_slices.size(); FUZZ_ASSERT(slices_obtained <= slices_needed); uint64_t offset = 0; const std::string data = target_buffer.toString(); diff --git a/test/common/buffer/buffer_speed_test.cc b/test/common/buffer/buffer_speed_test.cc index 0d9937e4cd62..d9456072b137 100644 --- a/test/common/buffer/buffer_speed_test.cc +++ b/test/common/buffer/buffer_speed_test.cc @@ -8,12 +8,12 @@ namespace Envoy { static constexpr uint64_t MaxBufferLength = 1024 * 1024; -// No-op release callback for use in BufferFragmentImpl instances. -static const std::function - DoNotReleaseFragment = nullptr; +// The fragment needs to be heap allocated in order to survive past the processing done in the inner +// loop in the benchmarks below. Do not attempt to release the actual contents of the buffer. +void deleteFragment(const void*, size_t, const Buffer::BufferFragmentImpl* self) { delete self; } // Test the creation of an empty OwnedImpl. -static void BufferCreateEmpty(benchmark::State& state) { +static void bufferCreateEmpty(benchmark::State& state) { uint64_t length = 0; for (auto _ : state) { Buffer::OwnedImpl buffer; @@ -21,10 +21,10 @@ static void BufferCreateEmpty(benchmark::State& state) { } benchmark::DoNotOptimize(length); } -BENCHMARK(BufferCreateEmpty); +BENCHMARK(bufferCreateEmpty); // Test the creation of an OwnedImpl with varying amounts of content. -static void BufferCreate(benchmark::State& state) { +static void bufferCreate(benchmark::State& state) { const std::string data(state.range(0), 'a'); const absl::string_view input(data); uint64_t length = 0; @@ -34,10 +34,10 @@ static void BufferCreate(benchmark::State& state) { } benchmark::DoNotOptimize(length); } -BENCHMARK(BufferCreate)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferCreate)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Grow an OwnedImpl in very small amounts. -static void BufferAddSmallIncrement(benchmark::State& state) { +static void bufferAddSmallIncrement(benchmark::State& state) { const std::string data("a"); const absl::string_view input(data); Buffer::OwnedImpl buffer; @@ -54,10 +54,10 @@ static void BufferAddSmallIncrement(benchmark::State& state) { } benchmark::DoNotOptimize(buffer.length()); } -BENCHMARK(BufferAddSmallIncrement)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5); +BENCHMARK(bufferAddSmallIncrement)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5); // Test the appending of varying amounts of content from a string to an OwnedImpl. -static void BufferAddString(benchmark::State& state) { +static void bufferAddString(benchmark::State& state) { const std::string data(state.range(0), 'a'); const absl::string_view input(data); Buffer::OwnedImpl buffer(input); @@ -69,11 +69,11 @@ static void BufferAddString(benchmark::State& state) { } benchmark::DoNotOptimize(buffer.length()); } -BENCHMARK(BufferAddString)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferAddString)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); -// Variant of BufferAddString that appends from another Buffer::Instance +// Variant of bufferAddString that appends from another Buffer::Instance // rather than from a string. -static void BufferAddBuffer(benchmark::State& state) { +static void bufferAddBuffer(benchmark::State& state) { const std::string data(state.range(0), 'a'); const absl::string_view input(data); const Buffer::OwnedImpl to_add(data); @@ -86,10 +86,10 @@ static void BufferAddBuffer(benchmark::State& state) { } benchmark::DoNotOptimize(buffer.length()); } -BENCHMARK(BufferAddBuffer)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferAddBuffer)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test the prepending of varying amounts of content from a string to an OwnedImpl. -static void BufferPrependString(benchmark::State& state) { +static void bufferPrependString(benchmark::State& state) { const std::string data(state.range(0), 'a'); const absl::string_view input(data); Buffer::OwnedImpl buffer(input); @@ -101,10 +101,10 @@ static void BufferPrependString(benchmark::State& state) { } benchmark::DoNotOptimize(buffer.length()); } -BENCHMARK(BufferPrependString)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferPrependString)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test the prepending of one OwnedImpl to another. -static void BufferPrependBuffer(benchmark::State& state) { +static void bufferPrependBuffer(benchmark::State& state) { const std::string data(state.range(0), 'a'); const absl::string_view input(data); Buffer::OwnedImpl buffer(input); @@ -113,8 +113,9 @@ static void BufferPrependBuffer(benchmark::State& state) { // buffer every time without the overhead of a copy, we use an BufferFragment that references // (and never deletes) an external string. Buffer::OwnedImpl to_add; - Buffer::BufferFragmentImpl fragment(input.data(), input.size(), DoNotReleaseFragment); - to_add.addBufferFragment(fragment); + auto fragment = + std::make_unique(input.data(), input.size(), deleteFragment); + to_add.addBufferFragment(*fragment.release()); buffer.prepend(to_add); if (buffer.length() >= MaxBufferLength) { @@ -123,9 +124,9 @@ static void BufferPrependBuffer(benchmark::State& state) { } benchmark::DoNotOptimize(buffer.length()); } -BENCHMARK(BufferPrependBuffer)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferPrependBuffer)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); -static void BufferDrain(benchmark::State& state) { +static void bufferDrain(benchmark::State& state) { const std::string data(state.range(0), 'a'); const absl::string_view input(data); const Buffer::OwnedImpl to_add(data); @@ -150,10 +151,10 @@ static void BufferDrain(benchmark::State& state) { } benchmark::DoNotOptimize(buffer.length()); } -BENCHMARK(BufferDrain)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferDrain)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Drain an OwnedImpl in very small amounts. -static void BufferDrainSmallIncrement(benchmark::State& state) { +static void bufferDrainSmallIncrement(benchmark::State& state) { const std::string data(1024 * 1024, 'a'); const absl::string_view input(data); Buffer::OwnedImpl buffer(input); @@ -165,10 +166,10 @@ static void BufferDrainSmallIncrement(benchmark::State& state) { } benchmark::DoNotOptimize(buffer.length()); } -BENCHMARK(BufferDrainSmallIncrement)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5); +BENCHMARK(bufferDrainSmallIncrement)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5); // Test the moving of content from one OwnedImpl to another. -static void BufferMove(benchmark::State& state) { +static void bufferMove(benchmark::State& state) { const std::string data(state.range(0), 'a'); const absl::string_view input(data); Buffer::OwnedImpl buffer1(input); @@ -180,12 +181,12 @@ static void BufferMove(benchmark::State& state) { uint64_t length = buffer1.length(); benchmark::DoNotOptimize(length); } -BENCHMARK(BufferMove)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferMove)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test the moving of content from one OwnedImpl to another, one byte at a time, to // exercise the (likely inefficient) code path in the implementation that handles // partial moves. -static void BufferMovePartial(benchmark::State& state) { +static void bufferMovePartial(benchmark::State& state) { const std::string data(state.range(0), 'a'); const absl::string_view input(data); Buffer::OwnedImpl buffer1(input); @@ -199,11 +200,11 @@ static void BufferMovePartial(benchmark::State& state) { uint64_t length = buffer1.length(); benchmark::DoNotOptimize(length); } -BENCHMARK(BufferMovePartial)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferMovePartial)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test the reserve+commit cycle, for the special case where the reserved space is // fully used (and therefore the commit size equals the reservation size). -static void BufferReserveCommit(benchmark::State& state) { +static void bufferReserveCommit(benchmark::State& state) { Buffer::OwnedImpl buffer; for (auto _ : state) { constexpr uint64_t NumSlices = 2; @@ -220,11 +221,11 @@ static void BufferReserveCommit(benchmark::State& state) { } benchmark::DoNotOptimize(buffer.length()); } -BENCHMARK(BufferReserveCommit)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferReserveCommit)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test the reserve+commit cycle, for the common case where the reserved space is // only partially used (and therefore the commit size is smaller than the reservation size). -static void BufferReserveCommitPartial(benchmark::State& state) { +static void bufferReserveCommitPartial(benchmark::State& state) { Buffer::OwnedImpl buffer; for (auto _ : state) { constexpr uint64_t NumSlices = 2; @@ -241,43 +242,45 @@ static void BufferReserveCommitPartial(benchmark::State& state) { } benchmark::DoNotOptimize(buffer.length()); } -BENCHMARK(BufferReserveCommitPartial)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferReserveCommitPartial)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test the linearization of a buffer in the best case where the data is in one slice. -static void BufferLinearizeSimple(benchmark::State& state) { +static void bufferLinearizeSimple(benchmark::State& state) { const std::string data(state.range(0), 'a'); const absl::string_view input(data); - Buffer::BufferFragmentImpl fragment(input.data(), input.size(), DoNotReleaseFragment); Buffer::OwnedImpl buffer; for (auto _ : state) { buffer.drain(buffer.length()); - buffer.addBufferFragment(fragment); + auto fragment = + std::make_unique(input.data(), input.size(), deleteFragment); + buffer.addBufferFragment(*fragment.release()); benchmark::DoNotOptimize(buffer.linearize(state.range(0))); } } -BENCHMARK(BufferLinearizeSimple)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferLinearizeSimple)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test the linearization of a buffer in the general case where the data is spread among // many slices. -static void BufferLinearizeGeneral(benchmark::State& state) { +static void bufferLinearizeGeneral(benchmark::State& state) { static constexpr uint64_t SliceSize = 1024; const std::string data(SliceSize, 'a'); const absl::string_view input(data); - Buffer::BufferFragmentImpl fragment(input.data(), input.size(), DoNotReleaseFragment); Buffer::OwnedImpl buffer; for (auto _ : state) { buffer.drain(buffer.length()); do { - buffer.addBufferFragment(fragment); + auto fragment = + std::make_unique(input.data(), input.size(), deleteFragment); + buffer.addBufferFragment(*fragment.release()); } while (buffer.length() < static_cast(state.range(0))); benchmark::DoNotOptimize(buffer.linearize(state.range(0))); } } -BENCHMARK(BufferLinearizeGeneral)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferLinearizeGeneral)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test buffer search, for the simple case where there are no partial matches for // the pattern in the buffer. -static void BufferSearch(benchmark::State& state) { +static void bufferSearch(benchmark::State& state) { const std::string Pattern(16, 'b'); std::string data; data.reserve(state.range(0) + Pattern.length()); @@ -292,11 +295,11 @@ static void BufferSearch(benchmark::State& state) { } benchmark::DoNotOptimize(result); } -BENCHMARK(BufferSearch)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferSearch)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test buffer search, for the more challenging case where there are many partial matches // for the pattern in the buffer. -static void BufferSearchPartialMatch(benchmark::State& state) { +static void bufferSearchPartialMatch(benchmark::State& state) { const std::string Pattern(16, 'b'); const std::string PartialMatch("babbabbbabbbbabbbbbabbbbbbabbbbbbbabbbbbbbba"); std::string data; @@ -315,11 +318,11 @@ static void BufferSearchPartialMatch(benchmark::State& state) { } benchmark::DoNotOptimize(result); } -BENCHMARK(BufferSearchPartialMatch)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferSearchPartialMatch)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test buffer startsWith, for the simple case where there is no match for the pattern at the start // of the buffer. -static void BufferStartsWith(benchmark::State& state) { +static void bufferStartsWith(benchmark::State& state) { const std::string Pattern(16, 'b'); std::string data; data.reserve(state.range(0) + Pattern.length()); @@ -336,10 +339,10 @@ static void BufferStartsWith(benchmark::State& state) { } benchmark::DoNotOptimize(result); } -BENCHMARK(BufferStartsWith)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +BENCHMARK(bufferStartsWith)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); // Test buffer startsWith, when there is a match at the start of the buffer. -static void BufferStartsWithMatch(benchmark::State& state) { +static void bufferStartsWithMatch(benchmark::State& state) { const std::string Prefix(state.range(1), 'b'); const std::string Suffix("babbabbbabbbbabbbbbabbbbbbabbbbbbbabbbbbbbba"); std::string data = Prefix; @@ -359,20 +362,10 @@ static void BufferStartsWithMatch(benchmark::State& state) { } benchmark::DoNotOptimize(result); } -BENCHMARK(BufferStartsWithMatch) +BENCHMARK(bufferStartsWithMatch) ->Args({1, 1}) ->Args({4096, 16}) ->Args({16384, 256}) ->Args({65536, 4096}); } // namespace Envoy - -// Boilerplate main(), which discovers benchmarks in the same file and runs them. -int main(int argc, char** argv) { - benchmark::Initialize(&argc, argv); - - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; - } - benchmark::RunSpecifiedBenchmarks(); -} diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index 3208dc583292..6934fc63c2f4 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -13,7 +13,6 @@ #include "gtest/gtest.h" using testing::_; -using testing::ContainerEq; using testing::Return; namespace Envoy { @@ -585,23 +584,36 @@ TEST_F(OwnedImplTest, AppendSliceForTest) { static constexpr size_t NumInputs = 3; static constexpr const char* Inputs[] = {"one", "2", "", "four", ""}; Buffer::OwnedImpl buffer; - RawSlice slices[NumInputs]; - EXPECT_EQ(0, buffer.getRawSlices(slices, NumInputs)); + EXPECT_EQ(0, buffer.getRawSlices().size()); + EXPECT_EQ(0, buffer.getRawSlices(NumInputs).size()); for (const auto& input : Inputs) { buffer.appendSliceForTest(input); } - // getRawSlices will only return the 3 slices with nonzero length. - EXPECT_EQ(3, buffer.getRawSlices(slices, NumInputs)); + // getRawSlices(max_slices) will only return the 3 slices with nonzero length. + RawSliceVector slices = buffer.getRawSlices(/*max_slices=*/NumInputs); + EXPECT_EQ(3, slices.size()); + + // Verify edge case where max_slices is -1 and +1 the actual non-empty slice count. + EXPECT_EQ(2, buffer.getRawSlices(/*max_slices=*/NumInputs - 1).size()); + EXPECT_EQ(3, buffer.getRawSlices(/*max_slices=*/NumInputs + 1).size()); auto expectSlice = [](const RawSlice& slice, const char* expected) { size_t length = strlen(expected); - EXPECT_EQ(length, slice.len_); + EXPECT_EQ(length, slice.len_) << expected; EXPECT_EQ(0, memcmp(slice.mem_, expected, length)); }; expectSlice(slices[0], "one"); expectSlice(slices[1], "2"); expectSlice(slices[2], "four"); + + // getRawSlices returns only the slices with nonzero length. + RawSliceVector slices_vector = buffer.getRawSlices(); + EXPECT_EQ(3, slices_vector.size()); + + expectSlice(slices_vector[0], "one"); + expectSlice(slices_vector[1], "2"); + expectSlice(slices_vector[2], "four"); } // Regression test for oss-fuzz issue @@ -694,14 +706,14 @@ void TestBufferMove(uint64_t buffer1_length, uint64_t buffer2_length, uint64_t expected_slice_count) { Buffer::OwnedImpl buffer1; buffer1.add(std::string(buffer1_length, 'a')); - EXPECT_EQ(1, buffer1.getRawSlices(nullptr, 0)); + EXPECT_EQ(1, buffer1.getRawSlices().size()); Buffer::OwnedImpl buffer2; buffer2.add(std::string(buffer2_length, 'b')); - EXPECT_EQ(1, buffer2.getRawSlices(nullptr, 0)); + EXPECT_EQ(1, buffer2.getRawSlices().size()); buffer1.move(buffer2); - EXPECT_EQ(expected_slice_count, buffer1.getRawSlices(nullptr, 0)); + EXPECT_EQ(expected_slice_count, buffer1.getRawSlices().size()); EXPECT_EQ(buffer1_length + buffer2_length, buffer1.length()); // Make sure `buffer2` was drained. EXPECT_EQ(0, buffer2.length()); diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index be3cfedaf671..1010e649843b 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -267,8 +267,8 @@ TEST_F(WatermarkBufferTest, MoveWatermarks) { TEST_F(WatermarkBufferTest, GetRawSlices) { buffer_.add(TEN_BYTES, 10); - RawSlice slices[2]; - ASSERT_EQ(1, buffer_.getRawSlices(&slices[0], 2)); + RawSliceVector slices = buffer_.getRawSlices(/*max_slices=*/2); + ASSERT_EQ(1, slices.size()); EXPECT_EQ(10, slices[0].len_); EXPECT_EQ(0, memcmp(slices[0].mem_, &TEN_BYTES[0], 10)); diff --git a/test/common/compressor/zlib_compressor_impl_test.cc b/test/common/compressor/zlib_compressor_impl_test.cc index dcaf12ba54cd..8c5b181b7d0e 100644 --- a/test/common/compressor/zlib_compressor_impl_test.cc +++ b/test/common/compressor/zlib_compressor_impl_test.cc @@ -14,9 +14,8 @@ namespace { class ZlibCompressorImplTest : public testing::Test { protected: void expectValidFlushedBuffer(const Buffer::OwnedImpl& output_buffer) { - uint64_t num_comp_slices = output_buffer.getRawSlices(nullptr, 0); - absl::FixedArray compressed_slices(num_comp_slices); - output_buffer.getRawSlices(compressed_slices.begin(), num_comp_slices); + Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); + const uint64_t num_comp_slices = compressed_slices.size(); const std::string header_hex_str = Hex::encode( reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); @@ -35,9 +34,8 @@ class ZlibCompressorImplTest : public testing::Test { void expectValidFinishedBuffer(const Buffer::OwnedImpl& output_buffer, const uint32_t input_size) { - uint64_t num_comp_slices = output_buffer.getRawSlices(nullptr, 0); - absl::FixedArray compressed_slices(num_comp_slices); - output_buffer.getRawSlices(compressed_slices.begin(), num_comp_slices); + Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); + const uint64_t num_comp_slices = compressed_slices.size(); const std::string header_hex_str = Hex::encode( reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 48b7c160237a..fba7f12b4eb4 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -110,6 +110,7 @@ envoy_cc_test( "//source/common/config:version_converter_lib", "//source/common/protobuf", "//source/common/stats:isolated_store_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks:common_lib", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", @@ -134,7 +135,7 @@ envoy_cc_test( "//source/common/config:resources_lib", "//source/common/config:version_converter_lib", "//source/common/protobuf", - "//source/common/stats:isolated_store_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks:common_lib", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", @@ -154,6 +155,7 @@ envoy_cc_test( srcs = ["grpc_stream_test.cc"], deps = [ "//source/common/config:grpc_stream_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", "//test/mocks/grpc:grpc_mocks", diff --git a/test/common/config/datasource_test.cc b/test/common/config/datasource_test.cc index e76c19a43da1..1897c1a867ce 100644 --- a/test/common/config/datasource_test.cc +++ b/test/common/config/datasource_test.cc @@ -33,6 +33,7 @@ class AsyncDataSourceTest : public testing::Test { Event::MockDispatcher dispatcher_; Event::MockTimer* retry_timer_; Event::TimerCb retry_timer_cb_; + NiceMock request_{&cm_.async_client_}; Config::DataSource::LocalAsyncDataProviderPtr local_data_provider_; Config::DataSource::RemoteAsyncDataProviderPtr remote_data_provider_; @@ -115,7 +116,7 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceReturnFailure) { initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - callbacks.onFailure(Envoy::Http::AsyncClient::FailureReason::Reset); + callbacks.onFailure(request_, Envoy::Http::AsyncClient::FailureReason::Reset); return nullptr; }); @@ -155,8 +156,9 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccessWith503) { initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - callbacks.onSuccess(Http::ResponseMessagePtr{new Http::ResponseMessageImpl( - Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); + callbacks.onSuccess( + request_, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); return nullptr; }); @@ -196,8 +198,9 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccessWithEmptyBody) { initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - callbacks.onSuccess(Http::ResponseMessagePtr{new Http::ResponseMessageImpl( - Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})}); + callbacks.onSuccess( + request_, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "200"}}})}); return nullptr; }); @@ -243,7 +246,7 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccessIncorrectSha256) { Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(body); - callbacks.onSuccess(std::move(response)); + callbacks.onSuccess(request_, std::move(response)); return nullptr; }); @@ -288,7 +291,7 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceSuccess) { Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(body); - callbacks.onSuccess(std::move(response)); + callbacks.onSuccess(request_, std::move(response)); return nullptr; }); @@ -325,8 +328,9 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceDoNotAllowEmpty) { initialize([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - callbacks.onSuccess(Http::ResponseMessagePtr{new Http::ResponseMessageImpl( - Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); + callbacks.onSuccess( + request_, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); return nullptr; }); @@ -369,7 +373,7 @@ TEST_F(AsyncDataSourceTest, DatasourceReleasedBeforeFetchingData) { Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(body); - callbacks.onSuccess(std::move(response)); + callbacks.onSuccess(request_, std::move(response)); return nullptr; }); @@ -413,8 +417,10 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceWithRetry) { initialize( [&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - callbacks.onSuccess(Http::ResponseMessagePtr{new Http::ResponseMessageImpl( - Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); + callbacks.onSuccess( + request_, + Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ + new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); return nullptr; }, num_retries); @@ -442,7 +448,7 @@ TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceWithRetry) { new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(body); - callbacks.onSuccess(std::move(response)); + callbacks.onSuccess(request_, std::move(response)); return nullptr; })); } diff --git a/test/common/config/delta_subscription_state_test.cc b/test/common/config/delta_subscription_state_test.cc index c0af136225db..474172e3c9a4 100644 --- a/test/common/config/delta_subscription_state_test.cc +++ b/test/common/config/delta_subscription_state_test.cc @@ -12,7 +12,6 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using testing::Eq; using testing::NiceMock; using testing::Throw; using testing::UnorderedElementsAre; diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index fbe30138d9aa..536836e53445 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -14,6 +14,7 @@ #include "common/protobuf/protobuf.h" #include "common/stats/isolated_store_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/common.h" #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" @@ -99,7 +100,7 @@ class GrpcMuxImplTestBase : public testing::Test { std::unique_ptr grpc_mux_; NiceMock callbacks_; NiceMock local_info_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; Envoy::Config::RateLimitSettings rate_limit_settings_; Stats::Gauge& control_plane_connected_state_; }; diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc index 0b0eb8e50c18..03864161b5f0 100644 --- a/test/common/config/grpc_stream_test.cc +++ b/test/common/config/grpc_stream_test.cc @@ -3,6 +3,7 @@ #include "common/config/grpc_stream.h" #include "common/protobuf/protobuf.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/grpc/mocks.h" @@ -31,7 +32,7 @@ class GrpcStreamTest : public testing::Test { NiceMock dispatcher_; Grpc::MockAsyncStream async_stream_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; NiceMock random_; Envoy::Config::RateLimitSettings rate_limit_settings_; NiceMock callbacks_; diff --git a/test/common/config/http_subscription_impl_test.cc b/test/common/config/http_subscription_impl_test.cc index 9c3e8c4022a5..d79884ef1915 100644 --- a/test/common/config/http_subscription_impl_test.cc +++ b/test/common/config/http_subscription_impl_test.cc @@ -18,7 +18,7 @@ TEST_F(HttpSubscriptionImplTest, OnRequestReset) { EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) .Times(0); - http_callbacks_->onFailure(Http::AsyncClient::FailureReason::Reset); + http_callbacks_->onFailure(http_request_, Http::AsyncClient::FailureReason::Reset); EXPECT_TRUE(statsAre(1, 0, 0, 1, 0, 0, 0)); timerTick(); EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); @@ -37,7 +37,7 @@ TEST_F(HttpSubscriptionImplTest, BadJsonRecovery) { EXPECT_CALL(*timer_, enableTimer(_, _)); EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); - http_callbacks_->onSuccess(std::move(message)); + http_callbacks_->onSuccess(http_request_, std::move(message)); EXPECT_TRUE(statsAre(1, 0, 1, 0, 0, 0, 0)); request_in_progress_ = false; timerTick(); diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index c570086fa600..af798a4efac8 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -151,7 +151,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { } EXPECT_CALL(random_gen_, random()).WillOnce(Return(0)); EXPECT_CALL(*timer_, enableTimer(_, _)); - http_callbacks_->onSuccess(std::move(message)); + http_callbacks_->onSuccess(http_request_, std::move(message)); if (accept) { version_ = version; } diff --git a/test/common/config/new_grpc_mux_impl_test.cc b/test/common/config/new_grpc_mux_impl_test.cc index 516ff13c36d1..7353ed85da6f 100644 --- a/test/common/config/new_grpc_mux_impl_test.cc +++ b/test/common/config/new_grpc_mux_impl_test.cc @@ -10,8 +10,8 @@ #include "common/config/utility.h" #include "common/config/version_converter.h" #include "common/protobuf/protobuf.h" -#include "common/stats/isolated_store_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/common.h" #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" @@ -60,7 +60,7 @@ class NewGrpcMuxImplTestBase : public testing::Test { std::unique_ptr grpc_mux_; NiceMock callbacks_; NiceMock local_info_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; Envoy::Config::RateLimitSettings rate_limit_settings_; Stats::Gauge& control_plane_connected_state_; }; diff --git a/test/common/config/registry_test.cc b/test/common/config/registry_test.cc index 2797951b0d3a..6c7043ec59ab 100644 --- a/test/common/config/registry_test.cc +++ b/test/common/config/registry_test.cc @@ -17,7 +17,7 @@ namespace { class InternalFactory : public Config::UntypedFactory { public: - virtual ~InternalFactory() = default; + ~InternalFactory() override = default; std::string category() const override { return ""; } }; @@ -50,7 +50,7 @@ TEST(RegistryTest, InternalFactoryNotPublished) { class PublishedFactory : public Config::UntypedFactory { public: - virtual ~PublishedFactory() = default; + ~PublishedFactory() override = default; std::string category() const override { return "testing.published"; } }; diff --git a/test/common/config/subscription_test_harness.h b/test/common/config/subscription_test_harness.h index 42b88778e2a3..e3d13e37cacc 100644 --- a/test/common/config/subscription_test_harness.h +++ b/test/common/config/subscription_test_harness.h @@ -105,7 +105,7 @@ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { virtual void doSubscriptionTearDown() {} - Stats::IsolatedStoreImpl stats_store_; + Stats::TestUtil::TestStore stats_store_; SubscriptionStats stats_; }; diff --git a/test/common/decompressor/zlib_decompressor_impl_test.cc b/test/common/decompressor/zlib_decompressor_impl_test.cc index e4d8610132e6..d2468aa0d966 100644 --- a/test/common/decompressor/zlib_decompressor_impl_test.cc +++ b/test/common/decompressor/zlib_decompressor_impl_test.cc @@ -259,7 +259,7 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressOfMultipleSlices) { original_text.append(sample); } - const uint64_t num_slices = buffer.getRawSlices(nullptr, 0); + const uint64_t num_slices = buffer.getRawSlices().size(); EXPECT_EQ(num_slices, 20); Envoy::Compressor::ZlibCompressorImpl compressor; diff --git a/test/common/filesystem/watcher_impl_test.cc b/test/common/filesystem/watcher_impl_test.cc index 8251ad49f1ae..9ff21f2274a3 100644 --- a/test/common/filesystem/watcher_impl_test.cc +++ b/test/common/filesystem/watcher_impl_test.cc @@ -151,5 +151,35 @@ TEST_F(WatcherImplTest, RootDirectoryPath) { #endif } +TEST_F(WatcherImplTest, SymlinkAtomicRename) { + Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); + + TestEnvironment::createPath(TestEnvironment::temporaryPath("envoy_test")); + TestEnvironment::createPath(TestEnvironment::temporaryPath("envoy_test/..timestamp1")); + { std::ofstream file(TestEnvironment::temporaryPath("envoy_test/..timestamp1/watched_file")); } + + TestEnvironment::createSymlink(TestEnvironment::temporaryPath("envoy_test/..timestamp1"), + TestEnvironment::temporaryPath("envoy_test/..data")); + TestEnvironment::createSymlink(TestEnvironment::temporaryPath("envoy_test/..data/watched_file"), + TestEnvironment::temporaryPath("envoy_test/watched_file")); + + WatchCallback callback; + EXPECT_CALL(callback, called(Watcher::Events::MovedTo)); + watcher->addWatch(TestEnvironment::temporaryPath("envoy_test/"), Watcher::Events::MovedTo, + [&](uint32_t events) -> void { + callback.called(events); + dispatcher_->exit(); + }); + + TestEnvironment::createPath(TestEnvironment::temporaryPath("envoy_test/..timestamp2")); + { std::ofstream file(TestEnvironment::temporaryPath("envoy_test/..timestamp2/watched_file")); } + TestEnvironment::createSymlink(TestEnvironment::temporaryPath("envoy_test/..timestamp2"), + TestEnvironment::temporaryPath("envoy_test/..tmp")); + TestEnvironment::renameFile(TestEnvironment::temporaryPath("envoy_test/..tmp"), + TestEnvironment::temporaryPath("envoy_test/..data")); + + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + } // namespace Filesystem } // namespace Envoy diff --git a/test/common/grpc/context_impl_test.cc b/test/common/grpc/context_impl_test.cc index b22b7e741980..c1fa773b25d3 100644 --- a/test/common/grpc/context_impl_test.cc +++ b/test/common/grpc/context_impl_test.cc @@ -22,7 +22,7 @@ TEST(GrpcContextTest, ChargeStats) { Stats::StatNamePool pool(*symbol_table_); const Stats::StatName service = pool.add("service"); const Stats::StatName method = pool.add("method"); - Context::RequestNames request_names{service, method}; + Context::RequestStatNames request_names{service, method}; ContextImpl context(*symbol_table_); context.chargeStat(cluster, request_names, true); EXPECT_EQ(1U, cluster.stats_store_.counter("grpc.service.method.success").value()); @@ -39,6 +39,11 @@ TEST(GrpcContextTest, ChargeStats) { EXPECT_EQ(3U, cluster.stats_store_.counter("grpc.service.method.request_message_count").value()); EXPECT_EQ(4U, cluster.stats_store_.counter("grpc.service.method.response_message_count").value()); + context.chargeRequestMessageStat(cluster, {}, 3); + context.chargeResponseMessageStat(cluster, {}, 4); + EXPECT_EQ(3U, cluster.stats_store_.counter("grpc.request_message_count").value()); + EXPECT_EQ(4U, cluster.stats_store_.counter("grpc.response_message_count").value()); + Http::TestResponseTrailerMapImpl trailers; trailers.setGrpcStatus("0"); const Http::HeaderEntry* status = trailers.GrpcStatus(); @@ -65,20 +70,21 @@ TEST(GrpcContextTest, ResolveServiceAndMethod) { const Http::HeaderEntry* path = headers.Path(); Stats::TestSymbolTable symbol_table; ContextImpl context(*symbol_table); - absl::optional request_names = context.resolveServiceAndMethod(path); + absl::optional request_names = + context.resolveDynamicServiceAndMethod(path); EXPECT_TRUE(request_names); EXPECT_EQ("service_name", symbol_table->toString(request_names->service_)); EXPECT_EQ("method_name", symbol_table->toString(request_names->method_)); headers.setPath(""); - EXPECT_FALSE(context.resolveServiceAndMethod(path)); + EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path)); headers.setPath("/"); - EXPECT_FALSE(context.resolveServiceAndMethod(path)); + EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path)); headers.setPath("//"); - EXPECT_FALSE(context.resolveServiceAndMethod(path)); + EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path)); headers.setPath("/service_name"); - EXPECT_FALSE(context.resolveServiceAndMethod(path)); + EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path)); headers.setPath("/service_name/"); - EXPECT_FALSE(context.resolveServiceAndMethod(path)); + EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path)); } } // namespace Grpc diff --git a/test/common/grpc/google_grpc_creds_test.cc b/test/common/grpc/google_grpc_creds_test.cc index 8ca5ce1d762e..8c3b4efdefd4 100644 --- a/test/common/grpc/google_grpc_creds_test.cc +++ b/test/common/grpc/google_grpc_creds_test.cc @@ -40,10 +40,10 @@ TEST_F(CredsUtilityTest, GetChannelCredentials) { const std::string var_name = "GOOGLE_APPLICATION_CREDENTIALS"; EXPECT_EQ(nullptr, ::getenv(var_name.c_str())); const std::string creds_path = TestEnvironment::runfilesPath("test/common/grpc/service_key.json"); - TestEnvironment::setEnvVar(var_name.c_str(), creds_path.c_str(), 0); + TestEnvironment::setEnvVar(var_name, creds_path, 0); creds->mutable_google_default(); EXPECT_NE(nullptr, CredsUtility::getChannelCredentials(config, *api_)); - TestEnvironment::unsetEnvVar(var_name.c_str()); + TestEnvironment::unsetEnvVar(var_name); } TEST_F(CredsUtilityTest, DefaultSslChannelCredentials) { diff --git a/test/common/http/BUILD b/test/common/http/BUILD index c7dadc09bc49..dce641d6475c 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -161,10 +161,12 @@ envoy_cc_fuzz_test( corpus = "conn_manager_impl_corpus", deps = [ ":conn_manager_impl_fuzz_proto_cc_proto", + "//include/envoy/http:request_id_extension_interface", "//source/common/common:empty_string", "//source/common/http:conn_manager_lib", "//source/common/http:context_lib", "//source/common/http:date_provider_lib", + "//source/common/http:request_id_extension_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", "//source/common/stats:symbol_table_creator_lib", @@ -191,6 +193,7 @@ envoy_cc_test( "//include/envoy/access_log:access_log_interface", "//include/envoy/buffer:buffer_interface", "//include/envoy/event:dispatcher_interface", + "//include/envoy/http:request_id_extension_interface", "//include/envoy/tracing:http_tracer_interface", "//source/common/access_log:access_log_formatter_lib", "//source/common/access_log:access_log_lib", @@ -203,6 +206,7 @@ envoy_cc_test( "//source/common/http:exception_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", + "//source/common/http:request_id_extension_lib", "//source/common/network:address_lib", "//source/common/stats:stats_lib", "//source/common/upstream:upstream_includes", @@ -233,13 +237,14 @@ envoy_cc_test( name = "conn_manager_utility_test", srcs = ["conn_manager_utility_test.cc"], deps = [ + "//include/envoy/http:request_id_extension_interface", "//source/common/event:dispatcher_lib", "//source/common/http:conn_manager_lib", "//source/common/http:headers_lib", + "//source/common/http:request_id_extension_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", "//source/common/runtime:runtime_lib", - "//source/common/runtime:uuid_util_lib", "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", @@ -368,3 +373,14 @@ envoy_cc_test( "//source/common/http:path_utility_lib", ], ) + +envoy_cc_test( + name = "request_id_extension_uuid_impl_test", + srcs = ["request_id_extension_uuid_impl_test.cc"], + deps = [ + "//source/common/http:request_id_extension_lib", + "//source/common/runtime:runtime_lib", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:utility_lib", + ], +) diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index d7fde7e1f114..fc46f8a5ef43 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -52,9 +52,13 @@ class AsyncClientImplTest : public testing::Test { .WillByDefault(ReturnRef(envoy::config::core::v3::Locality().default_instance())); } - void expectSuccess(uint64_t code) { - EXPECT_CALL(callbacks_, onSuccess_(_)) - .WillOnce(Invoke([code](ResponseMessage* response) -> void { + void expectSuccess(AsyncClient::Request* sent_request, uint64_t code) { + EXPECT_CALL(callbacks_, onSuccess_(_, _)) + .WillOnce(Invoke([sent_request, code](const AsyncClient::Request& request, + ResponseMessage* response) -> void { + // Verify that callback is called with the same request handle as returned by + // AsyncClient::send(). + EXPECT_EQ(sent_request, &request); EXPECT_EQ(code, Utility::getResponseStatus(response->headers())); })); } @@ -152,9 +156,11 @@ TEST_F(AsyncClientImplTest, Basic) { EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(©), false)); EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true)); - expectSuccess(200); - client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + EXPECT_NE(request, nullptr); + + expectSuccess(request, 200); ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); response_decoder_->decodeHeaders(std::move(response_headers), false); @@ -188,12 +194,15 @@ TEST_F(AsyncClientImplTracingTest, Basic) { EXPECT_CALL(parent_span_, spawnChild_(_, "async fake_cluster egress", _)) .WillOnce(Return(child_span)); - expectSuccess(200); AsyncClient::RequestOptions options = AsyncClient::RequestOptions().setParentSpan(parent_span_); EXPECT_CALL(*child_span, setSampled(true)); EXPECT_CALL(*child_span, injectContext(_)); - client_.send(std::move(message_), callbacks_, options); + + auto* request = client_.send(std::move(message_), callbacks_, options); + EXPECT_NE(request, nullptr); + + expectSuccess(request, 200); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); @@ -228,7 +237,6 @@ TEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) { copy.addCopy(":scheme", "http"); EXPECT_CALL(parent_span_, spawnChild_(_, child_span_name_, _)).WillOnce(Return(child_span)); - expectSuccess(200); AsyncClient::RequestOptions options = AsyncClient::RequestOptions() .setParentSpan(parent_span_) @@ -236,7 +244,11 @@ TEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) { .setSampled(false); EXPECT_CALL(*child_span, setSampled(false)); EXPECT_CALL(*child_span, injectContext(_)); - client_.send(std::move(message_), callbacks_, options); + + auto* request = client_.send(std::move(message_), callbacks_, options); + EXPECT_NE(request, nullptr); + + expectSuccess(request, 200); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); @@ -279,13 +291,16 @@ TEST_F(AsyncClientImplTest, BasicHashPolicy) { EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(©), false)); EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true)); - expectSuccess(200); AsyncClient::RequestOptions options; Protobuf::RepeatedPtrField hash_policy; hash_policy.Add()->mutable_header()->set_header_name(":path"); options.setHashPolicy(hash_policy); - client_.send(std::move(message_), callbacks_, options); + + auto* request = client_.send(std::move(message_), callbacks_, options); + EXPECT_NE(request, nullptr); + + expectSuccess(request, 200); ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); response_decoder_->decodeHeaders(std::move(response_headers), false); @@ -312,7 +327,9 @@ TEST_F(AsyncClientImplTest, Retry) { EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true)); message_->headers().setReferenceEnvoyRetryOn(Headers::get().EnvoyRetryOnValues._5xx); - client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + + auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + EXPECT_NE(request, nullptr); // Expect retry and retry timer create. timer_ = new NiceMock(&dispatcher_); @@ -333,7 +350,7 @@ TEST_F(AsyncClientImplTest, Retry) { timer_->invokeCallback(); // Normal response. - expectSuccess(200); + expectSuccess(request, 200); ResponseHeaderMapPtr response_headers2(new TestResponseHeaderMapImpl{{":status", "200"}}); response_decoder_->decodeHeaders(std::move(response_headers2), true); } @@ -462,7 +479,8 @@ TEST_F(AsyncClientImplTest, MultipleRequests) { EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), false)); EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true)); - client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + auto* request1 = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + EXPECT_NE(request1, nullptr); // Send request 2. RequestMessagePtr message2{new RequestMessageImpl()}; @@ -478,18 +496,57 @@ TEST_F(AsyncClientImplTest, MultipleRequests) { return nullptr; })); EXPECT_CALL(stream_encoder2, encodeHeaders(HeaderMapEqualRef(&message2->headers()), true)); - client_.send(std::move(message2), callbacks2, AsyncClient::RequestOptions()); + + auto* request2 = client_.send(std::move(message2), callbacks2, AsyncClient::RequestOptions()); + EXPECT_NE(request2, nullptr); + + // Send request 3. + RequestMessagePtr message3{new RequestMessageImpl()}; + HttpTestUtility::addDefaultHeaders(message3->headers()); + NiceMock stream_encoder3; + ResponseDecoder* response_decoder3{}; + MockAsyncClientCallbacks callbacks3; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseDecoder& decoder, + ConnectionPool::Callbacks& callbacks) -> ConnectionPool::Cancellable* { + callbacks.onPoolReady(stream_encoder3, cm_.conn_pool_.host_, stream_info_); + response_decoder3 = &decoder; + return nullptr; + })); + EXPECT_CALL(stream_encoder3, encodeHeaders(HeaderMapEqualRef(&message3->headers()), true)); + + auto* request3 = client_.send(std::move(message3), callbacks3, AsyncClient::RequestOptions()); + EXPECT_NE(request3, nullptr); // Finish request 2. ResponseHeaderMapPtr response_headers2(new TestResponseHeaderMapImpl{{":status", "503"}}); - EXPECT_CALL(callbacks2, onSuccess_(_)); + EXPECT_CALL(callbacks2, onSuccess_(_, _)) + .WillOnce(Invoke( + [request2](const AsyncClient::Request& request, ResponseMessage* response) -> void { + // Verify that callback is called with the same request handle as returned by + // AsyncClient::send(). + EXPECT_EQ(request2, &request); + EXPECT_EQ(503, Utility::getResponseStatus(response->headers())); + })); response_decoder2->decodeHeaders(std::move(response_headers2), true); // Finish request 1. ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); response_decoder_->decodeHeaders(std::move(response_headers), false); - expectSuccess(200); + expectSuccess(request1, 200); response_decoder_->decodeData(data, true); + + // Finish request 3. + ResponseHeaderMapPtr response_headers3(new TestResponseHeaderMapImpl{{":status", "500"}}); + EXPECT_CALL(callbacks3, onSuccess_(_, _)) + .WillOnce(Invoke( + [request3](const AsyncClient::Request& request, ResponseMessage* response) -> void { + // Verify that callback is called with the same request handle as returned by + // AsyncClient::send(). + EXPECT_EQ(request3, &request); + EXPECT_EQ(500, Utility::getResponseStatus(response->headers())); + })); + response_decoder3->decodeHeaders(std::move(response_headers3), true); } TEST_F(AsyncClientImplTest, StreamAndRequest) { @@ -508,7 +565,8 @@ TEST_F(AsyncClientImplTest, StreamAndRequest) { EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), false)); EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true)); - client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + EXPECT_NE(request, nullptr); // Start stream Buffer::InstancePtr body{new Buffer::OwnedImpl("test body")}; @@ -544,7 +602,7 @@ TEST_F(AsyncClientImplTest, StreamAndRequest) { // Finish request. ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); response_decoder_->decodeHeaders(std::move(response_headers), false); - expectSuccess(200); + expectSuccess(request, 200); response_decoder_->decodeData(data, true); } @@ -598,9 +656,11 @@ TEST_F(AsyncClientImplTest, Trailers) { EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), false)); EXPECT_CALL(stream_encoder_, encodeData(BufferEqual(&data), true)); - expectSuccess(200); - client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + EXPECT_NE(request, nullptr); + + expectSuccess(request, 200); ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); response_decoder_->decodeHeaders(std::move(response_headers), false); response_decoder_->decodeData(data, false); @@ -617,9 +677,11 @@ TEST_F(AsyncClientImplTest, ImmediateReset) { })); EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true)); - expectSuccess(503); - client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + EXPECT_NE(request, nullptr); + + expectSuccess(request, 503); stream_encoder_.getStream().resetStream(StreamResetReason::RemoteReset); EXPECT_EQ( @@ -818,11 +880,20 @@ TEST_F(AsyncClientImplTest, ResetAfterResponseStart) { response_decoder_ = &decoder; return nullptr; })); - EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true)); - EXPECT_CALL(callbacks_, onFailure(_)); - client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + EXPECT_NE(request, nullptr); + + EXPECT_CALL(callbacks_, onFailure(_, _)) + .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request, + AsyncClient::FailureReason reason) { + // Verify that callback is called with the same request handle as returned by + // AsyncClient::send(). + EXPECT_EQ(&request, sent_request); + EXPECT_EQ(reason, AsyncClient::FailureReason::Reset); + })); + ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); response_decoder_->decodeHeaders(std::move(response_headers), false); stream_encoder_.getStream().resetStream(StreamResetReason::RemoteReset); @@ -915,11 +986,20 @@ TEST_F(AsyncClientImplTest, DestroyWithActiveRequest) { callbacks.onPoolReady(stream_encoder_, cm_.conn_pool_.host_, stream_info_); return nullptr; })); - EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true)); + + auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + EXPECT_NE(request, nullptr); + EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); - EXPECT_CALL(callbacks_, onFailure(_)); - client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); + EXPECT_CALL(callbacks_, onFailure(_, _)) + .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request, + AsyncClient::FailureReason reason) { + // Verify that callback is called with the same request handle as returned by + // AsyncClient::send(). + EXPECT_EQ(&request, sent_request); + EXPECT_EQ(reason, AsyncClient::FailureReason::Reset); + })); } TEST_F(AsyncClientImplTracingTest, DestroyWithActiveRequest) { @@ -937,9 +1017,18 @@ TEST_F(AsyncClientImplTracingTest, DestroyWithActiveRequest) { AsyncClient::RequestOptions options = AsyncClient::RequestOptions().setParentSpan(parent_span_); EXPECT_CALL(*child_span, setSampled(true)); EXPECT_CALL(*child_span, injectContext(_)); - client_.send(std::move(message_), callbacks_, options); - EXPECT_CALL(callbacks_, onFailure(_)); + auto* request = client_.send(std::move(message_), callbacks_, options); + EXPECT_NE(request, nullptr); + + EXPECT_CALL(callbacks_, onFailure(_, _)) + .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request, + AsyncClient::FailureReason reason) { + // Verify that callback is called with the same request handle as returned by + // AsyncClient::send(). + EXPECT_EQ(&request, sent_request); + EXPECT_EQ(reason, AsyncClient::FailureReason::Reset); + })); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1"))); @@ -962,7 +1051,14 @@ TEST_F(AsyncClientImplTest, PoolFailure) { return nullptr; })); - expectSuccess(503); + EXPECT_CALL(callbacks_, onSuccess_(_, _)) + .WillOnce(Invoke([](const AsyncClient::Request& request, ResponseMessage* response) -> void { + // The callback gets called before AsyncClient::send() completes, which means that we don't + // have a request handle to compare to. + EXPECT_NE(nullptr, &request); + EXPECT_EQ(503, Utility::getResponseStatus(response->headers())); + })); + EXPECT_EQ(nullptr, client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions())); EXPECT_EQ( @@ -979,7 +1075,13 @@ TEST_F(AsyncClientImplTest, PoolFailureWithBody) { return nullptr; })); - expectSuccess(503); + EXPECT_CALL(callbacks_, onSuccess_(_, _)) + .WillOnce(Invoke([](const AsyncClient::Request& request, ResponseMessage* response) -> void { + // The callback gets called before AsyncClient::send() completes, which means that we don't + // have a request handle to compare to. + EXPECT_NE(nullptr, &request); + EXPECT_EQ(503, Utility::getResponseStatus(response->headers())); + })); message_->body() = std::make_unique("hello"); EXPECT_EQ(nullptr, client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions())); @@ -1056,12 +1158,16 @@ TEST_F(AsyncClientImplTest, RequestTimeout) { })); EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true)); - expectSuccess(504); timer_ = new NiceMock(&dispatcher_); EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _)); EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); - client_.send(std::move(message_), callbacks_, - AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(40))); + + auto* request = + client_.send(std::move(message_), callbacks_, + AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(40))); + EXPECT_NE(request, nullptr); + + expectSuccess(request, 504); timer_->invokeCallback(); EXPECT_EQ(1UL, @@ -1083,7 +1189,6 @@ TEST_F(AsyncClientImplTracingTest, RequestTimeout) { })); EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true)); - expectSuccess(504); timer_ = new NiceMock(&dispatcher_); EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _)); @@ -1096,7 +1201,11 @@ TEST_F(AsyncClientImplTracingTest, RequestTimeout) { .setTimeout(std::chrono::milliseconds(40)); EXPECT_CALL(*child_span, setSampled(true)); EXPECT_CALL(*child_span, injectContext(_)); - client_.send(std::move(message_), callbacks_, options); + + auto* request = client_.send(std::move(message_), callbacks_, options); + EXPECT_NE(request, nullptr); + + expectSuccess(request, 504); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); diff --git a/test/common/http/codec_client_test.cc b/test/common/http/codec_client_test.cc index f0e7b5fc7c1a..9d5cac781586 100644 --- a/test/common/http/codec_client_test.cc +++ b/test/common/http/codec_client_test.cc @@ -285,7 +285,7 @@ TEST_F(CodecClientTest, SSLConnectionInfo) { // Test the codec getting input from a real TCP connection. class CodecNetworkTest : public testing::TestWithParam { public: - CodecNetworkTest() : api_(Api::createApiForTest()) { + CodecNetworkTest() : api_(Api::createApiForTest()), stream_info_(api_->timeSource()) { dispatcher_ = api_->allocateDispatcher(); auto socket = std::make_shared( Network::Test::getAnyAddress(GetParam()), nullptr, true); @@ -304,7 +304,7 @@ class CodecNetworkTest : public testing::TestWithParam void { upstream_connection_ = dispatcher_->createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); upstream_connection_->addConnectionCallbacks(upstream_callbacks_); expected_callbacks--; @@ -365,6 +365,7 @@ class CodecNetworkTest : public testing::TestWithParam client_callbacks_; NiceMock inner_encoder_; NiceMock outer_decoder_; + StreamInfo::StreamInfoImpl stream_info_; }; // Send a block of data from upstream, and ensure it is received by the codec. diff --git a/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5629973466710016 b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5629973466710016 new file mode 100644 index 000000000000..9b9554fa8976 --- /dev/null +++ b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5629973466710016 @@ -0,0 +1,21 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + } + end_stream: true + } +} +actions { + new_stream { + request_headers { + } + } +} diff --git a/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5635865126895616 b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5635865126895616 new file mode 100644 index 000000000000..10ae307bc7ef --- /dev/null +++ b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5635865126895616 @@ -0,0 +1,184 @@ +actions { + quiesce_drain { + } +} +actions { + stream_action { + stream_id: 2097152 + response { + continue_headers { + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: "GET" + value: "/ddddddddddddd" + } + headers { + key: ":path" + value: "/ddddddddddddd" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: "0" + value: "GET" + } + headers { + key: "GET" + } + headers { + key: "GET" + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: "connection" + value: ",,,,,,,,[,(,5,,,,,,up,,,,upg1ade" + } + } + } +} +actions { + stream_action { + response { + continue_headers { + headers { + key: "connection" + value: ",,,,,,,,[,(,5,,,,,,up,,,,upg1ade" + } + } + } + } +} +actions { + stream_action { + response { + continue_headers { + headers { + key: "connection" + value: ",,,,,,,,[,(,5,,,,,,up,,,,upg1ade" + } + } + } + } +} +actions { + stream_action { + response { + continue_headers { + headers { + key: "connection" + value: ",,,,,,,,[,(,5,,,,,,up,,,,upg1ade" + } + } + } + } +} +actions { + stream_action { + response { + data: 64512 + } + } +} +actions { + stream_action { + response { + continue_headers { + headers { + key: "connection" + value: ",,,,,,,,[,(,5,,,,,,up,,,,upg1ade" + } + } + end_stream: true + } + } +} +actions { + client_drain { + } +} +actions { + client_drain { + } +} +actions { + stream_action { + response { + continue_headers { + headers { + key: "connection" + value: ",,,,,,,,[,(,5,,,,,,up,,,,upg1ade" + } + } + } + } +} +actions { + stream_action { + response { + continue_headers { + headers { + key: "connection" + value: ",,,,,,,,[,(,5,,,,,,up,,,,upg1ade" + } + } + end_stream: true + } + } +} +actions { + stream_action { + stream_id: 1024 + response { + continue_headers { + headers { + key: "connection" + value: ",,,,,,,,[,(,5,,,,,,up,,,,upg1ade" + } + } + } + } +} +actions { + stream_action { + response { + continue_headers { + headers { + key: "connection" + value: ",,,,,,,,[,(,5,,,,pg1ade" + } + } + } + } +} +actions { + stream_action { + request { + reset_stream: 2097152 + } + } +} +actions { + stream_action { + response { + continue_headers { + headers { + key: "connection" + value: ",,,,,,X,,[,(,5.,,,,,up,,,,upgeta1ade" + } + } + } + } +} diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 5557420efeb8..831e038d9b81 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -80,6 +80,8 @@ fromHttp2Settings(const test::common::http::Http2Settings& settings) { return options; } +using StreamResetCallbackFn = std::function; + // Internal representation of stream state. Encapsulates the stream state, mocks // and encoders for both the request/response. class HttpStream : public LinkedObject { @@ -122,17 +124,25 @@ class HttpStream : public LinkedObject { } request_, response_; HttpStream(ClientConnection& client, const TestRequestHeaderMapImpl& request_headers, - bool end_stream) { + bool end_stream, StreamResetCallbackFn stream_reset_callback) + : stream_reset_callback_(stream_reset_callback) { request_.request_encoder_ = &client.newStream(response_.response_decoder_); ON_CALL(request_.stream_callbacks_, onResetStream(_, _)) .WillByDefault(InvokeWithoutArgs([this] { ENVOY_LOG_MISC(trace, "reset request for stream index {}", stream_index_); resetStream(); + stream_reset_callback_(); })); ON_CALL(response_.stream_callbacks_, onResetStream(_, _)) .WillByDefault(InvokeWithoutArgs([this] { ENVOY_LOG_MISC(trace, "reset response for stream index {}", stream_index_); + // Reset the client stream when we know the server stream has been reset. This ensures + // that the internal book keeping resetStream() below is consistent with the state of the + // client codec state, which is necessary to prevent multiple simultaneous streams for the + // HTTP/1 codec. + request_.request_encoder_->getStream().resetStream(StreamResetReason::LocalReset); resetStream(); + stream_reset_callback_(); })); ON_CALL(request_.request_decoder_, decodeHeaders_(_, true)) .WillByDefault(InvokeWithoutArgs([this] { @@ -320,6 +330,7 @@ class HttpStream : public LinkedObject { } int32_t stream_index_{-1}; + StreamResetCallbackFn stream_reset_callback_; }; // Buffer between client and server H1/H2 codecs. This models each write operation @@ -393,6 +404,8 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; uint32_t max_response_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; + const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action = envoy::config::core::v3::HttpProtocolOptions::ALLOW; ClientConnectionPtr client; ServerConnectionPtr server; const bool http2 = http_version == HttpVersion::Http2; @@ -414,12 +427,12 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi fromHttp2Settings(input.h2_settings().server())}; server = std::make_unique( server_connection, server_callbacks, stats_store, server_http2_options, - max_request_headers_kb, max_request_headers_count); + max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } else { const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())}; server = std::make_unique( server_connection, stats_store, server_callbacks, server_http1settings, - max_request_headers_kb, max_request_headers_count); + max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } ReorderBuffer client_write_buf{*server}; @@ -465,9 +478,14 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi } }; + // We track whether the connection should be closed for HTTP/1, since stream resets imply + // connection closes. + bool should_close_connection = false; + constexpr auto max_actions = 1024; try { - for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) { + for (int i = 0; i < std::min(max_actions, input.actions().size()) && !should_close_connection; + ++i) { const auto& action = input.actions(i); ENVOY_LOG_MISC(trace, "action {} with {} streams", action.DebugString(), streams.size()); switch (action.action_selector_case()) { @@ -485,7 +503,12 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi HttpStreamPtr stream = std::make_unique( *client, fromSanitizedHeaders(action.new_stream().request_headers()), - action.new_stream().end_stream()); + action.new_stream().end_stream(), [&should_close_connection, http2]() { + // HTTP/1 codec has stream reset implying connection close. + if (!http2) { + should_close_connection = true; + } + }); stream->moveIntoListBack(std::move(stream), pending_streams); break; } @@ -528,11 +551,14 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi // Maybe nothing is set? break; } - if (DebugMode) { + if (DebugMode && !should_close_connection) { client_server_buf_drain(); } } - client_server_buf_drain(); + // Drain all remaining buffers, unless the connection is effectively closed. + if (!should_close_connection) { + client_server_buf_drain(); + } if (http2) { dynamic_cast(*client).goAway(); dynamic_cast(*server).goAway(); diff --git a/test/common/http/codes_test.cc b/test/common/http/codes_test.cc index 2d615cc7974b..136519f60c5a 100644 --- a/test/common/http/codes_test.cc +++ b/test/common/http/codes_test.cc @@ -46,8 +46,8 @@ class CodeUtilityTest : public testing::Test { } Stats::TestSymbolTable symbol_table_; - Stats::IsolatedStoreImpl global_store_; - Stats::IsolatedStoreImpl cluster_scope_; + Stats::TestUtil::TestStore global_store_; + Stats::TestUtil::TestStore cluster_scope_; Http::CodeStatsImpl code_stats_; Stats::StatNamePool pool_; }; diff --git a/test/common/http/conn_manager_impl_corpus/regression_test_reuse_codec b/test/common/http/conn_manager_impl_corpus/regression_test_reuse_codec new file mode 100644 index 000000000000..5600a01f513c --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/regression_test_reuse_codec @@ -0,0 +1,16 @@ +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + request_headers { + headers { + key: "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" + } + } + } +} +actions { +} diff --git a/test/common/http/conn_manager_impl_corpus/state_local_complete b/test/common/http/conn_manager_impl_corpus/state_local_complete new file mode 100644 index 000000000000..2e03e3ad7a1c --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/state_local_complete @@ -0,0 +1,18 @@ +actions { + new_stream { + end_stream: true + } +} +actions { + stream_action { + response { + continue_headers { + headers { + key: "\177\177\177\177\177\177\177\177" + } + } + } + } +} +actions { +} diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index a4f7e7e867b5..38d471ded4cc 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -19,11 +19,12 @@ #include "common/http/context_impl.h" #include "common/http/date_provider_impl.h" #include "common/http/exception.h" +#include "common/http/request_id_extension_impl.h" #include "common/network/address_impl.h" #include "common/network/utility.h" #include "common/stats/symbol_table_creator.h" -#include "test/common/http/conn_manager_impl_fuzz.pb.h" +#include "test/common/http/conn_manager_impl_fuzz.pb.validate.h" #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" #include "test/mocks/access_log/mocks.h" @@ -71,10 +72,13 @@ class FuzzConfig : public ConnectionManagerConfig { ON_CALL(scoped_route_config_provider_, lastUpdated()) .WillByDefault(Return(time_system_.systemTime())); access_logs_.emplace_back(std::make_shared>()); + request_id_extension_ = RequestIDExtensionFactory::defaultInstance(random_); } void newStream() { - codec_ = new NiceMock(); + if (!codec_) { + codec_ = new NiceMock(); + } decoder_filter_ = new NiceMock(); encoder_filter_ = new NiceMock(); EXPECT_CALL(filter_factory_, createFilterChain(_)) @@ -87,6 +91,8 @@ class FuzzConfig : public ConnectionManagerConfig { } // Http::ConnectionManagerConfig + + RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; } const std::list& accessLogs() override { return access_logs_; } ServerConnectionPtr createCodec(Network::Connection&, const Buffer::Instance&, ServerConnectionCallbacks&) override { @@ -149,9 +155,15 @@ class FuzzConfig : public ConnectionManagerConfig { const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return false; } bool shouldMergeSlashes() const override { return false; } + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const override { + return envoy::config::core::v3::HttpProtocolOptions::ALLOW; + } const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager config_; + NiceMock random_; + RequestIDExtensionSharedPtr request_id_extension_; std::list access_logs_; MockServerConnection* codec_{}; MockStreamDecoderFilter* decoder_filter_{}; @@ -207,6 +219,8 @@ class FuzzStream { const HeaderMap& request_headers, bool end_stream) : conn_manager_(conn_manager), config_(config) { config_.newStream(); + request_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; + response_state_ = StreamState::PendingHeaders; EXPECT_CALL(*config_.codec_, dispatch(_)) .WillOnce(InvokeWithoutArgs([this, &request_headers, end_stream] { decoder_ = &conn_manager_.newStream(encoder_); @@ -221,13 +235,18 @@ class FuzzStream { headers->setHost( Fuzz::replaceInvalidHostCharacters(headers->Host()->value().getStringView())); } + // If sendLocalReply is called: + ON_CALL(encoder_, encodeHeaders(_, true)) + .WillByDefault(Invoke([this](const ResponseHeaderMap&, bool end_stream) -> void { + response_state_ = + end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; + })); decoder_->decodeHeaders(std::move(headers), end_stream); })); fakeOnData(); decoder_filter_ = config.decoder_filter_; encoder_filter_ = config.encoder_filter_; - request_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; - response_state_ = StreamState::PendingHeaders; + FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); } void fakeOnData() { @@ -307,6 +326,7 @@ class FuzzStream { decoder_->decodeData(buf, data_action.end_stream()); })); fakeOnData(); + FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); state = data_action.end_stream() ? StreamState::Closed : StreamState::PendingDataOrTrailers; } break; @@ -328,6 +348,7 @@ class FuzzStream { Fuzz::fromHeaders(trailers_action.headers()))); })); fakeOnData(); + FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); state = StreamState::Closed; } break; @@ -342,6 +363,7 @@ class FuzzStream { throw CodecProtocolException("blah"); })); fakeOnData(); + FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); state = StreamState::Closed; } break; @@ -432,6 +454,13 @@ class FuzzStream { using FuzzStreamPtr = std::unique_ptr; DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + FuzzConfig config; NiceMock drain_close; NiceMock random; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index c7890bcedbb5..a820323b4286 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -8,6 +8,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/http/request_id_extension.h" #include "envoy/tracing/http_tracer.h" #include "envoy/type/tracing/v3/custom_tag.pb.h" #include "envoy/type/v3/percent.pb.h" @@ -23,6 +24,7 @@ #include "common/http/exception.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" +#include "common/http/request_id_extension_impl.h" #include "common/network/address_impl.h" #include "common/network/utility.h" #include "common/upstream/upstream_impl.h" @@ -58,7 +60,6 @@ using testing::HasSubstr; using testing::InSequence; using testing::Invoke; using testing::InvokeWithoutArgs; -using testing::Matcher; using testing::NiceMock; using testing::Ref; using testing::Return; @@ -93,7 +94,8 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan POOL_HISTOGRAM(fake_stats_))}, "", fake_stats_), tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}, - listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))} { + listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}, + request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)) { ON_CALL(route_config_provider_, lastUpdated()) .WillByDefault(Return(test_time_.timeSystem().systemTime())); @@ -344,6 +346,11 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } + RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; } + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headersWithUnderscoresAction() const override { + return headers_with_underscores_action_; + } Envoy::Event::SimulatedTimeSystem test_time_; NiceMock route_config_provider_; @@ -400,8 +407,11 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan Http::Http1Settings http1_settings_; bool normalize_path_ = false; bool merge_slashes_ = false; + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; NiceMock upstream_conn_; // for websocket tests NiceMock conn_pool_; // for websocket tests + RequestIDExtensionSharedPtr request_id_extension_; // TODO(mattklein123): Not all tests have been converted over to better setup. Convert the rest. MockResponseEncoder response_encoder_; @@ -2891,36 +2901,10 @@ TEST_F(HttpConnectionManagerImplTest, FrameFloodError) { Buffer::OwnedImpl fake_input("1234"); EXPECT_LOG_NOT_CONTAINS("warning", "downstream HTTP flood", conn_manager_->onData(fake_input, false)); -} - -// Verify that FrameFloodException causes connection to be closed abortively as well as logged -// if runtime indicates to do so. -TEST_F(HttpConnectionManagerImplTest, FrameFloodErrorWithLog) { - InSequence s; - setup(false, ""); - - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { - conn_manager_->newStream(response_encoder_); - throw FrameFloodException("too many outbound frames."); - })); - - EXPECT_CALL(runtime_.snapshot_, - featureEnabled("http.connection_manager.log_flood_exception", - Matcher(_))) - .WillOnce(Return(true)); - - EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_)); - EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0); - - // FrameFloodException should result in reset of the streams followed by abortive close. - EXPECT_CALL(filter_callbacks_.connection_, - close(Network::ConnectionCloseType::FlushWriteAndDelay)); - - // Kick off the incoming data. - Buffer::OwnedImpl fake_input("1234"); - EXPECT_LOG_CONTAINS("warning", - "downstream HTTP flood from IP '0.0.0.0:0': too many outbound frames.", - conn_manager_->onData(fake_input, false)); + EXPECT_TRUE(filter_callbacks_.connection_.streamInfo().hasResponseFlag( + StreamInfo::ResponseFlag::DownstreamProtocolError)); + EXPECT_EQ("codec error: too many outbound frames.", + filter_callbacks_.connection_.streamInfo().responseCodeDetails().value()); } TEST_F(HttpConnectionManagerImplTest, IdleTimeoutNoCodec) { @@ -4159,6 +4143,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterHeadReply) { EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11)); conn_manager_->onData(fake_input, false); } @@ -4634,6 +4619,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11)); conn_manager_->onData(fake_input, false); Buffer::OwnedImpl decoded_data_to_forward; @@ -4812,6 +4798,7 @@ TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, protocol(Envoy::Http::Protocol::Http11)); conn_manager_->onData(fake_input, false); // Mimic a decoder filter that trapped data and now sends it on, since the data was buffered diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index b2301a03648a..60ece5bfdf44 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -1,15 +1,16 @@ #include #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/http/request_id_extension.h" #include "envoy/type/v3/percent.pb.h" #include "common/http/conn_manager_utility.h" #include "common/http/header_utility.h" #include "common/http/headers.h" +#include "common/http/request_id_extension_impl.h" #include "common/network/address_impl.h" #include "common/network/utility.h" #include "common/runtime/runtime_impl.h" -#include "common/runtime/uuid_util.h" #include "test/mocks/http/mocks.h" #include "test/mocks/local_info/mocks.h" @@ -33,6 +34,42 @@ using testing::ReturnRef; namespace Envoy { namespace Http { +class MockRequestIDExtension : public RequestIDExtension { +public: + explicit MockRequestIDExtension(Runtime::RandomGenerator& random) + : real_(RequestIDExtensionFactory::defaultInstance(random)) { + ON_CALL(*this, set(_, _)) + .WillByDefault([this](Http::RequestHeaderMap& request_headers, bool force) { + return real_->set(request_headers, force); + }); + ON_CALL(*this, setInResponse(_, _)) + .WillByDefault([this](Http::ResponseHeaderMap& response_headers, + const Http::RequestHeaderMap& request_headers) { + return real_->setInResponse(response_headers, request_headers); + }); + ON_CALL(*this, modBy(_, _, _)) + .WillByDefault([this](const Http::RequestHeaderMap& request_headers, uint64_t& out, + uint64_t mod) { return real_->modBy(request_headers, out, mod); }); + ON_CALL(*this, getTraceStatus(_)) + .WillByDefault([this](const Http::RequestHeaderMap& request_headers) { + return real_->getTraceStatus(request_headers); + }); + ON_CALL(*this, setTraceStatus(_, _)) + .WillByDefault([this](Http::RequestHeaderMap& request_headers, TraceStatus trace_status) { + real_->setTraceStatus(request_headers, trace_status); + }); + } + + MOCK_METHOD(void, set, (Http::RequestHeaderMap&, bool)); + MOCK_METHOD(void, setInResponse, (Http::ResponseHeaderMap&, const Http::RequestHeaderMap&)); + MOCK_METHOD(bool, modBy, (const Http::RequestHeaderMap&, uint64_t&, uint64_t)); + MOCK_METHOD(TraceStatus, getTraceStatus, (const Http::RequestHeaderMap&)); + MOCK_METHOD(void, setTraceStatus, (Http::RequestHeaderMap&, TraceStatus)); + +private: + RequestIDExtensionSharedPtr real_; +}; + class MockInternalAddressConfig : public Http::InternalAddressConfig { public: MOCK_METHOD(bool, isInternalAddress, (const Network::Address::Instance&), (const)); @@ -52,6 +89,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { return ServerConnectionPtr{createCodec_(connection, instance, callbacks)}; } + MOCK_METHOD(RequestIDExtensionSharedPtr, requestIDExtension, ()); MOCK_METHOD(const std::list&, accessLogs, ()); MOCK_METHOD(ServerConnection*, createCodec_, (Network::Connection&, const Buffer::Instance&, ServerConnectionCallbacks&)); @@ -96,14 +134,22 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const)); MOCK_METHOD(bool, shouldNormalizePath, (), (const)); MOCK_METHOD(bool, shouldMergeSlashes, (), (const)); + MOCK_METHOD(envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction, + headersWithUnderscoresAction, (), (const)); std::unique_ptr internal_address_config_ = std::make_unique(); }; +const Http::LowerCaseString& traceStatusHeader() { + static Http::LowerCaseString header("x-trace-status"); + return header; +} + class ConnectionManagerUtilityTest : public testing::Test { public: - ConnectionManagerUtilityTest() { + ConnectionManagerUtilityTest() + : request_id_extension_(std::make_shared>(random_)) { ON_CALL(config_, userAgent()).WillByDefault(ReturnRef(user_agent_)); envoy::type::v3::FractionalPercent percent1; @@ -116,6 +162,7 @@ class ConnectionManagerUtilityTest : public testing::Test { ON_CALL(config_, tracingConfig()).WillByDefault(Return(&tracing_config_)); ON_CALL(config_, via()).WillByDefault(ReturnRef(via_)); + ON_CALL(config_, requestIDExtension()).WillByDefault(Return(request_id_extension_)); } struct MutateRequestRet { @@ -132,10 +179,9 @@ class ConnectionManagerUtilityTest : public testing::Test { // the request is internal/external, given the importance of these two pieces of data. MutateRequestRet callMutateRequestHeaders(RequestHeaderMap& headers, Protocol) { MutateRequestRet ret; - ret.downstream_address_ = - ConnectionManagerUtility::mutateRequestHeaders(headers, connection_, config_, route_config_, - random_, local_info_) - ->asString(); + ret.downstream_address_ = ConnectionManagerUtility::mutateRequestHeaders( + headers, connection_, config_, route_config_, local_info_) + ->asString(); ConnectionManagerUtility::mutateTracingRequestHeader(headers, runtime_, config_, &route_); ret.internal_ = HeaderUtility::isEnvoyInternalRequest(headers); return ret; @@ -143,6 +189,7 @@ class ConnectionManagerUtilityTest : public testing::Test { NiceMock connection_; NiceMock random_; + std::shared_ptr> request_id_extension_; NiceMock config_; NiceMock route_config_; NiceMock route_; @@ -337,7 +384,8 @@ TEST_F(ConnectionManagerUtilityTest, ViaEmpty) { EXPECT_FALSE(request_headers.has(Headers::get().Via)); TestResponseHeaderMapImpl response_headers; - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, via_); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), via_); EXPECT_FALSE(response_headers.has(Headers::get().Via)); } @@ -354,9 +402,11 @@ TEST_F(ConnectionManagerUtilityTest, ViaAppend) { TestResponseHeaderMapImpl response_headers; // Pretend we're doing a 100-continue transform here. - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), ""); // The actual response header processing. - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, via_); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), via_); EXPECT_EQ("foo", response_headers.get_(Headers::get().Via)); } @@ -703,7 +753,8 @@ TEST_F(ConnectionManagerUtilityTest, MutateResponseHeaders) { {"connection", "foo"}, {"transfer-encoding", "foo"}, {"custom_header", "custom_value"}}; TestRequestHeaderMapImpl request_headers{{"x-request-id", "request-id"}}; - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), ""); EXPECT_EQ(1UL, response_headers.size()); EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -718,7 +769,8 @@ TEST_F(ConnectionManagerUtilityTest, DoNotRemoveConnectionUpgradeForWebSocketRes {"connection", "upgrade"}, {"transfer-encoding", "foo"}, {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), ""); EXPECT_EQ(2UL, response_headers.size()) << response_headers; EXPECT_EQ("upgrade", response_headers.get_("connection")); @@ -733,7 +785,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) { {"connection", "foo"}, {"transfer-encoding", "bar"}, {"custom_header", "custom_value"}}; EXPECT_FALSE(Utility::isUpgrade(request_headers)); EXPECT_FALSE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), ""); EXPECT_EQ(1UL, response_headers.size()) << response_headers; EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -748,7 +801,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) { {"custom_header", "custom_value"}}; EXPECT_FALSE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), ""); EXPECT_EQ(2UL, response_headers.size()) << response_headers; EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -761,7 +815,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) { TestResponseHeaderMapImpl response_headers{{"transfer-encoding", "foo"}, {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_FALSE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), ""); EXPECT_EQ(1UL, response_headers.size()) << response_headers; EXPECT_EQ("bar", response_headers.get_("upgrade")); @@ -774,10 +829,27 @@ TEST_F(ConnectionManagerUtilityTest, MutateResponseHeadersReturnXRequestId) { TestRequestHeaderMapImpl request_headers{{"x-request-id", "request-id"}, {"x-envoy-force-trace", "true"}}; - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, ""); + EXPECT_CALL(*request_id_extension_, + setInResponse(testing::Ref(response_headers), testing::Ref(request_headers))) + .Times(1); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), ""); EXPECT_EQ("request-id", response_headers.get_("x-request-id")); } +// Test that we do not return x-request-id if we were not requested to force a trace. +TEST_F(ConnectionManagerUtilityTest, SkipMutateResponseHeadersReturnXRequestId) { + TestResponseHeaderMapImpl response_headers; + TestRequestHeaderMapImpl request_headers{{"x-request-id", "request-id"}}; + + EXPECT_CALL(*request_id_extension_, + setInResponse(testing::Ref(response_headers), testing::Ref(request_headers))) + .Times(0); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), ""); + EXPECT_EQ("", response_headers.get_("x-request-id")); +} + // Test full sanitization of x-forwarded-client-cert. TEST_F(ConnectionManagerUtilityTest, MtlsSanitizeClientCert) { auto ssl = std::make_shared>(); @@ -1075,10 +1147,12 @@ TEST_F(ConnectionManagerUtilityTest, RandomSamplingWhenGlobalSet) { Http::TestRequestHeaderMapImpl request_headers{ {"x-request-id", "125a4afb-6f55-44ba-ad80-413f09f48a28"}}; + EXPECT_CALL(*request_id_extension_, + setTraceStatus(testing::Ref(request_headers), TraceStatus::Sampled)) + .Times(1); callMutateRequestHeaders(request_headers, Protocol::Http2); - EXPECT_EQ(UuidTraceStatus::Sampled, - UuidUtils::isTraceableUuid(request_headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::Sampled, request_id_extension_->getTraceStatus(request_headers)); } TEST_F(ConnectionManagerUtilityTest, SamplingWithoutRouteOverride) { @@ -1093,10 +1167,12 @@ TEST_F(ConnectionManagerUtilityTest, SamplingWithoutRouteOverride) { Http::TestRequestHeaderMapImpl request_headers{ {"x-request-id", "125a4afb-6f55-44ba-ad80-413f09f48a28"}}; + EXPECT_CALL(*request_id_extension_, + setTraceStatus(testing::Ref(request_headers), TraceStatus::Sampled)) + .Times(1); callMutateRequestHeaders(request_headers, Protocol::Http2); - EXPECT_EQ(UuidTraceStatus::Sampled, - UuidUtils::isTraceableUuid(request_headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::Sampled, request_id_extension_->getTraceStatus(request_headers)); } TEST_F(ConnectionManagerUtilityTest, SamplingWithRouteOverride) { @@ -1118,10 +1194,12 @@ TEST_F(ConnectionManagerUtilityTest, SamplingWithRouteOverride) { Http::TestRequestHeaderMapImpl request_headers{ {"x-request-id", "125a4afb-6f55-44ba-ad80-413f09f48a28"}}; + EXPECT_CALL(*request_id_extension_, + setTraceStatus(testing::Ref(request_headers), TraceStatus::NoTrace)) + .Times(1); callMutateRequestHeaders(request_headers, Protocol::Http2); - EXPECT_EQ(UuidTraceStatus::NoTrace, - UuidUtils::isTraceableUuid(request_headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers)); } // Sampling must not be done on client traced. @@ -1138,10 +1216,10 @@ TEST_F(ConnectionManagerUtilityTest, SamplingMustNotBeDoneOnClientTraced) { // The x_request_id has TRACE_FORCED(a) set in the TRACE_BYTE_POSITION(14) character. Http::TestRequestHeaderMapImpl request_headers{ {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}; + EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0); callMutateRequestHeaders(request_headers, Protocol::Http2); - EXPECT_EQ(UuidTraceStatus::Forced, - UuidUtils::isTraceableUuid(request_headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::Forced, request_id_extension_->getTraceStatus(request_headers)); } // Sampling, global off. @@ -1157,10 +1235,15 @@ TEST_F(ConnectionManagerUtilityTest, NoTraceWhenSamplingSetButGlobalNotSet) { Http::TestRequestHeaderMapImpl request_headers{ {"x-request-id", "125a4afb-6f55-44ba-ad80-413f09f48a28"}}; + EXPECT_CALL(*request_id_extension_, + setTraceStatus(testing::Ref(request_headers), TraceStatus::Sampled)) + .Times(1); + EXPECT_CALL(*request_id_extension_, + setTraceStatus(testing::Ref(request_headers), TraceStatus::NoTrace)) + .Times(1); callMutateRequestHeaders(request_headers, Protocol::Http2); - EXPECT_EQ(UuidTraceStatus::NoTrace, - UuidUtils::isTraceableUuid(request_headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers)); } // Client, client enabled, global on. @@ -1176,10 +1259,12 @@ TEST_F(ConnectionManagerUtilityTest, ClientSamplingWhenGlobalSet) { Http::TestRequestHeaderMapImpl request_headers{ {"x-client-trace-id", "f4dca0a9-12c7-4307-8002-969403baf480"}, {"x-request-id", "125a4afb-6f55-44ba-ad80-413f09f48a28"}}; + EXPECT_CALL(*request_id_extension_, + setTraceStatus(testing::Ref(request_headers), TraceStatus::Client)) + .Times(1); callMutateRequestHeaders(request_headers, Protocol::Http2); - EXPECT_EQ(UuidTraceStatus::Client, - UuidUtils::isTraceableUuid(request_headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::Client, request_id_extension_->getTraceStatus(request_headers)); } // Client, client disabled, global on. @@ -1199,10 +1284,10 @@ TEST_F(ConnectionManagerUtilityTest, NoTraceWhenClientSamplingNotSetAndGlobalSet Http::TestRequestHeaderMapImpl request_headers{ {"x-client-trace-id", "f4dca0a9-12c7-4307-8002-969403baf480"}, {"x-request-id", "125a4afb-6f55-44ba-ad80-413f09f48a28"}}; + EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0); callMutateRequestHeaders(request_headers, Protocol::Http2); - EXPECT_EQ(UuidTraceStatus::NoTrace, - UuidUtils::isTraceableUuid(request_headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers)); } // Forced, global on. @@ -1217,10 +1302,12 @@ TEST_F(ConnectionManagerUtilityTest, ForcedTracedWhenGlobalSet) { runtime_.snapshot_, featureEnabled("tracing.global_enabled", An(), _)) .WillOnce(Return(true)); + EXPECT_CALL(*request_id_extension_, setTraceStatus(testing::Ref(headers), TraceStatus::Forced)) + .Times(1); EXPECT_EQ((MutateRequestRet{"10.0.0.1:0", true}), callMutateRequestHeaders(headers, Protocol::Http2)); - EXPECT_EQ(UuidTraceStatus::Forced, UuidUtils::isTraceableUuid(headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::Forced, request_id_extension_->getTraceStatus(headers)); } // Forced, global off. @@ -1235,30 +1322,35 @@ TEST_F(ConnectionManagerUtilityTest, NoTraceWhenForcedTracedButGlobalNotSet) { runtime_.snapshot_, featureEnabled("tracing.global_enabled", An(), _)) .WillOnce(Return(false)); + EXPECT_CALL(*request_id_extension_, setTraceStatus(testing::Ref(headers), TraceStatus::Forced)) + .Times(1); + EXPECT_CALL(*request_id_extension_, setTraceStatus(testing::Ref(headers), TraceStatus::NoTrace)) + .Times(1); EXPECT_EQ((MutateRequestRet{"10.0.0.1:0", true}), callMutateRequestHeaders(headers, Protocol::Http2)); - EXPECT_EQ(UuidTraceStatus::NoTrace, UuidUtils::isTraceableUuid(headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(headers)); } // Forced, global on, broken uuid. TEST_F(ConnectionManagerUtilityTest, NoTraceOnBrokenUuid) { Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-force-trace", "true"}, {"x-request-id", "bb"}}; + EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0); callMutateRequestHeaders(request_headers, Protocol::Http2); - EXPECT_EQ(UuidTraceStatus::NoTrace, - UuidUtils::isTraceableUuid(request_headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers)); } TEST_F(ConnectionManagerUtilityTest, RemovesProxyResponseHeaders) { Http::TestRequestHeaderMapImpl request_headers{{}}; Http::TestResponseHeaderMapImpl response_headers{{"keep-alive", "timeout=60"}, {"proxy-connection", "proxy-header"}}; - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, ""); + EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, + config_.requestIDExtension(), ""); - EXPECT_EQ(UuidTraceStatus::NoTrace, - UuidUtils::isTraceableUuid(request_headers.get_("x-request-id"))); + EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers)); EXPECT_FALSE(response_headers.has("keep-alive")); EXPECT_FALSE(response_headers.has("proxy-connection")); @@ -1340,6 +1432,8 @@ TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestId) { ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-request-id", "my-request-id"}, {"x-forwarded-for", "198.51.100.1"}}; + EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1); + EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0); EXPECT_EQ((MutateRequestRet{"134.2.2.11:0", false}), callMutateRequestHeaders(headers, Protocol::Http2)); EXPECT_CALL(random_, uuid()).Times(0); @@ -1352,6 +1446,8 @@ TEST_F(ConnectionManagerUtilityTest, PreseverExternalRequestIdNoReqId) { ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-forwarded-for", "198.51.100.1"}}; + EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1); + EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0); EXPECT_EQ((MutateRequestRet{"134.2.2.11:0", false}), callMutateRequestHeaders(headers, Protocol::Http2)); EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId)); @@ -1361,7 +1457,10 @@ TEST_F(ConnectionManagerUtilityTest, PreseverExternalRequestIdNoReqId) { // requestID TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestIdNoEdgeRequestKeepRequestId) { ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true)); - TestHeaderMapImpl headers{{"x-request-id", "myReqId"}}; + TestRequestHeaderMapImpl headers{{"x-request-id", "myReqId"}}; + EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1); + EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0); + callMutateRequestHeaders(headers, Protocol::Http2); EXPECT_EQ("myReqId", headers.get_(Headers::get().RequestId)); } @@ -1370,6 +1469,8 @@ TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestIdNoEdgeRequestKeepR TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestIdNoEdgeRequestGenerateNewRequestId) { ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers; + EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1); + EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0); callMutateRequestHeaders(headers, Protocol::Http2); EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId)); } @@ -1378,12 +1479,14 @@ TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestIdNoEdgeRequestGener TEST_F(ConnectionManagerUtilityTest, NoPreserveExternalRequestIdEdgeRequestGenerateRequestId) { ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(false)); connection_.remote_address_ = std::make_shared("134.2.2.11"); + // with request id { - ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-forwarded-for", "198.51.100.1"}, {"x-request-id", "my-request-id"}}; + EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), true)).Times(1); + EXPECT_CALL(*request_id_extension_, set(_, false)).Times(0); EXPECT_EQ((MutateRequestRet{"134.2.2.11:0", false}), callMutateRequestHeaders(headers, Protocol::Http2)); EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId)); @@ -1392,6 +1495,8 @@ TEST_F(ConnectionManagerUtilityTest, NoPreserveExternalRequestIdEdgeRequestGener // with no request id { TestRequestHeaderMapImpl headers{{"x-forwarded-for", "198.51.100.1"}}; + EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), true)).Times(1); + EXPECT_CALL(*request_id_extension_, set(_, false)).Times(0); EXPECT_EQ((MutateRequestRet{"134.2.2.11:0", false}), callMutateRequestHeaders(headers, Protocol::Http2)); EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId)); @@ -1405,6 +1510,8 @@ TEST_F(ConnectionManagerUtilityTest, NoPreserveExternalRequestIdNoEdgeRequest) { // with no request id { TestRequestHeaderMapImpl headers; + EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1); + EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0); callMutateRequestHeaders(headers, Protocol::Http2); EXPECT_EQ(random_.uuid_, headers.get_(Headers::get().RequestId)); } @@ -1412,6 +1519,8 @@ TEST_F(ConnectionManagerUtilityTest, NoPreserveExternalRequestIdNoEdgeRequest) { // with request id { TestRequestHeaderMapImpl headers{{"x-request-id", "my-request-id"}}; + EXPECT_CALL(*request_id_extension_, set(testing::Ref(headers), false)).Times(1); + EXPECT_CALL(*request_id_extension_, set(_, true)).Times(0); callMutateRequestHeaders(headers, Protocol::Http2); EXPECT_EQ("my-request-id", headers.get_(Headers::get().RequestId)); } diff --git a/test/common/http/header_map_impl_fuzz_test.cc b/test/common/http/header_map_impl_fuzz_test.cc index d6b9d7a05a72..bfd7507e0558 100644 --- a/test/common/http/header_map_impl_fuzz_test.cc +++ b/test/common/http/header_map_impl_fuzz_test.cc @@ -103,7 +103,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) // Randomly (using fuzzer data) set the header_field to either be of type Reference or Inline const auto& str = lower_case_strings.back(); Http::HeaderString header_field; // By default it's Inline - if ((str->get().size() > 0) && (str->get().at(0) & 0x1)) { + if ((!str->get().empty()) && (str->get().at(0) & 0x1)) { // Keeping header_field as Inline header_field.setCopy(str->get()); // inlineTransform can only be applied to Inline type! diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index d9039a7301e3..55d2056aab78 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -455,13 +455,13 @@ TEST(HeaderIsValidTest, InvalidHeaderValuesAreRejected) { continue; } - EXPECT_FALSE(HeaderUtility::headerIsValid(std::string(1, i))); + EXPECT_FALSE(HeaderUtility::headerValueIsValid(std::string(1, i))); } } TEST(HeaderIsValidTest, ValidHeaderValuesAreAccepted) { - EXPECT_TRUE(HeaderUtility::headerIsValid("some-value")); - EXPECT_TRUE(HeaderUtility::headerIsValid("Some Other Value")); + EXPECT_TRUE(HeaderUtility::headerValueIsValid("some-value")); + EXPECT_TRUE(HeaderUtility::headerValueIsValid("Some Other Value")); } TEST(HeaderIsValidTest, AuthorityIsValid) { @@ -485,5 +485,13 @@ TEST(HeaderAddTest, HeaderAdd) { &headers); } +TEST(HeaderIsValidTest, HeaderNameContainsUnderscore) { + EXPECT_FALSE(HeaderUtility::headerNameContainsUnderscore("cookie")); + EXPECT_FALSE(HeaderUtility::headerNameContainsUnderscore("x-something")); + EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore("_cookie")); + EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore("cookie_")); + EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore("x_something")); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index 64e734e4620d..76ef5380d85a 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -27,6 +27,7 @@ envoy_cc_test( "//source/common/http:exception_lib", "//source/common/http:header_map_lib", "//source/common/http/http1:codec_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/http:http_mocks", "//test/mocks/init:init_mocks", diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index cf3d3943b22f..0c26f621f9e8 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -11,6 +11,7 @@ #include "common/http/http1/codec_impl.h" #include "common/runtime/runtime_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/buffer/mocks.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" @@ -18,14 +19,13 @@ #include "test/test_common/printers.h" #include "test/test_common/test_runtime.h" +#include "absl/strings/string_view.h" #include "gmock/gmock.h" #include "gtest/gtest.h" using testing::_; -using testing::AtLeast; using testing::InSequence; using testing::Invoke; -using testing::InvokeWithoutArgs; using testing::NiceMock; using testing::Return; using testing::ReturnRef; @@ -43,14 +43,22 @@ std::string createHeaderFragment(int num_headers) { } return headers; } + +Buffer::OwnedImpl createBufferWithOneByteSlices(absl::string_view input) { + Buffer::OwnedImpl buffer; + for (const char& c : input) { + buffer.appendSliceForTest(&c, 1); + } + return buffer; +} } // namespace class Http1ServerConnectionImplTest : public testing::Test { public: void initialize() { - codec_ = - std::make_unique(connection_, store_, callbacks_, codec_settings_, - max_request_headers_kb_, max_request_headers_count_); + codec_ = std::make_unique( + connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, headers_with_underscores_action_); } NiceMock connection_; @@ -89,7 +97,9 @@ class Http1ServerConnectionImplTest : public testing::Test { protected: uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; - Stats::IsolatedStoreImpl store_; + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_{envoy::config::core::v3::HttpProtocolOptions::ALLOW}; + Stats::TestUtil::TestStore store_; }; void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_url, @@ -102,9 +112,9 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = - std::make_unique(connection_, store_, callbacks_, codec_settings_, - max_request_headers_kb_, max_request_headers_count_); + codec_ = std::make_unique( + connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); } MockRequestDecoder decoder; @@ -131,14 +141,14 @@ void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_abs // Make a new 'codec' with the right settings if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = - std::make_unique(connection_, store_, callbacks_, codec_settings_, - max_request_headers_kb_, max_request_headers_count_); + codec_ = std::make_unique( + connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); } MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); @@ -151,27 +161,31 @@ void Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) { // Make a new 'codec' with the right settings if (enable_trailers) { codec_settings_.enable_trailers_ = enable_trailers; - codec_ = - std::make_unique(connection_, store_, callbacks_, codec_settings_, - max_request_headers_kb_, max_request_headers_count_); + codec_ = std::make_unique( + connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); } - MockRequestDecoder decoder; + InSequence sequence; + StrictMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)) .WillOnce(Invoke([&](ResponseEncoder&, bool) -> RequestDecoder& { return decoder; })); EXPECT_CALL(decoder, decodeHeaders_(_, false)); + Buffer::OwnedImpl expected_data("Hello World"); if (enable_trailers) { - EXPECT_CALL(decoder, decodeData(_, false)).Times(AtLeast(1)); + // Verify that body data is delivered before trailers. + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); EXPECT_CALL(decoder, decodeTrailers_); } else { - EXPECT_CALL(decoder, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); EXPECT_CALL(decoder, decodeData(_, true)); } - - Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello " - "World\r\n0\r\nhello: world\r\nsecond: header\r\n\r\n"); + Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" + "6\r\nHello \r\n" + "5\r\nWorld\r\n" + "0\r\nhello: world\r\nsecond: header\r\n\r\n"); codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); } @@ -181,9 +195,9 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ initialize(); // Make a new 'codec' with the right settings codec_settings_.enable_trailers_ = enable_trailers; - codec_ = - std::make_unique(connection_, store_, callbacks_, codec_settings_, - max_request_headers_kb_, max_request_headers_count_); + codec_ = std::make_unique( + connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); std::string exception_reason; NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)) @@ -191,9 +205,9 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ if (enable_trailers) { EXPECT_CALL(decoder, decodeHeaders_(_, false)); - EXPECT_CALL(decoder, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(decoder, decodeData(_, false)); } else { - EXPECT_CALL(decoder, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(decoder, decodeData(_, false)); EXPECT_CALL(decoder, decodeData(_, true)); } @@ -266,14 +280,43 @@ TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { {":path", "/"}, {":method", "GET"}, }; - EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nTest:\r\nHello: World\r\n\r\n"); codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, IdentityEncoding) { +// We support the identity encoding, but because it does not end in chunked encoding we reject it +// per RFC 7230 Section 3.3.3 +TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { + initialize(); + + InSequence sequence; + + MockRequestDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: identity\r\n\r\n"); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, + "http/1.1 protocol error: unsupported transfer encoding"); +} + +TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { + initialize(); + + InSequence sequence; + + MockRequestDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: gzip\r\n\r\n"); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, + "http/1.1 protocol error: unsupported transfer encoding"); +} + +// Verify that data in the two body chunks is merged before the call to decodeData. +TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { initialize(); InSequence sequence; @@ -283,16 +326,27 @@ TEST_F(Http1ServerConnectionImplTest, IdentityEncoding) { TestHeaderMapImpl expected_headers{ {":path", "/"}, - {":method", "GET"}, - {"transfer-encoding", "identity"}, + {":method", "POST"}, + {"transfer-encoding", "chunked"}, }; - EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); - Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: identity\r\n\r\n"); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + Buffer::OwnedImpl expected_data("Hello World"); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); + // Call to decodeData("", true) happens after. + Buffer::OwnedImpl empty(""); + EXPECT_CALL(decoder, decodeData(BufferEqual(&empty), true)); + + Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" + "6\r\nHello \r\n" + "5\r\nWorld\r\n" + "0\r\n\r\n"); codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { +// Verify dispatch behavior when dispatching an incomplete chunk, and resumption of the parse via a +// second dispatch. +TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { initialize(); InSequence sequence; @@ -305,13 +359,52 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { {":method", "POST"}, {"transfer-encoding", "chunked"}, }; - EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)).Times(1); - Buffer::OwnedImpl expected_data("Hello World"); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)).Times(1); - EXPECT_CALL(decoder, decodeData(_, true)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + Buffer::OwnedImpl expected_data1("Hello Worl"); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)); - Buffer::OwnedImpl buffer( - "POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n"); + Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" + "6\r\nHello \r\n" + "5\r\nWorl"); + codec_->dispatch(buffer); + EXPECT_EQ(0U, buffer.length()); + + // Process the rest of the body and final chunk. + Buffer::OwnedImpl expected_data2("d"); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), false)); + EXPECT_CALL(decoder, decodeData(_, true)); + + Buffer::OwnedImpl buffer2("d\r\n" + "0\r\n\r\n"); + codec_->dispatch(buffer2); + EXPECT_EQ(0U, buffer2.length()); +} + +// Verify that headers and chunked body are processed correctly and data is merged before the +// decodeData call even if delivered in a buffer that holds 1 byte per slice. +TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { + initialize(); + + InSequence sequence; + + MockRequestDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + TestHeaderMapImpl expected_headers{ + {":path", "/"}, + {":method", "POST"}, + {"transfer-encoding", "chunked"}, + }; + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + Buffer::OwnedImpl expected_data("Hello World"); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); + EXPECT_CALL(decoder, decodeData(_, true)); + + Buffer::OwnedImpl buffer = + createBufferWithOneByteSlices("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" + "6\r\nHello \r\n" + "5\r\nWorld\r\n" + "0\r\n\r\n"); codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); } @@ -329,10 +422,10 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { {":method", "POST"}, {"transfer-encoding", "Chunked"}, }; - EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl expected_data("Hello World"); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)).Times(1); - EXPECT_CALL(decoder, decodeData(_, true)).Times(1); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); + EXPECT_CALL(decoder, decodeData(_, true)); Buffer::OwnedImpl buffer( "POST / HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n"); @@ -340,7 +433,32 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { EXPECT_EQ(0U, buffer.length()); } -// Currently http_parser does not support chained transfer encodings. +// Verify that body dispatch does not happen after detecting a parse error processing a chunk +// header. +TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { + initialize(); + + InSequence sequence; + + MockRequestDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + TestHeaderMapImpl expected_headers{ + {":path", "/"}, + {":method", "POST"}, + {"transfer-encoding", "chunked"}, + }; + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + EXPECT_CALL(decoder, decodeData(_, _)).Times(0); + + Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" + "6\r\nHello \r\n" + "invalid\r\nWorl"); + + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, + "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); +} + TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { initialize(); @@ -381,7 +499,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); TestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; - EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.0\r\n\r\n"); codec_->dispatch(buffer); @@ -421,7 +539,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { return decoder; })); - EXPECT_CALL(decoder, decodeHeaders_(_, true)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(_, true)); codec_->dispatch(buffer); std::string output; @@ -444,7 +562,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { response_encoder = &encoder; return decoder; })); - EXPECT_CALL(decoder, decodeHeaders_(_, true)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(_, true)); codec_->dispatch(buffer); EXPECT_EQ(Protocol::Http11, codec_->protocol()); } @@ -506,7 +624,10 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { .WillOnce(Invoke([&](ResponseEncoder&, bool) -> RequestDecoder& { return decoder; })); EXPECT_CALL(decoder, decodeHeaders_(_, false)); - EXPECT_CALL(decoder, decodeData(_, false)).Times(AtLeast(1)); + // Verify that body is delivered as soon as the final chunk marker is found, even if an error is + // found while processing trailers. + Buffer::OwnedImpl expected_data("body"); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\n" "Host: host\r\n" @@ -576,7 +697,7 @@ TEST_F(Http1ServerConnectionImplTest, SimpleGet) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); TestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; - EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); codec_->dispatch(buffer); @@ -649,7 +770,7 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtection) { // In most tests the write output is serialized to a buffer here it is // ignored to build up queued "end connection" sentinels. EXPECT_CALL(connection_, write(_, _)) - .Times(1) + .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> void { // Move the response out of data while preserving the buffer fragment sentinels. local_buffer.move(data); @@ -659,7 +780,8 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtection) { response_encoder->encodeHeaders(headers, true); } - // Trying to shove a third response in the queue should trigger flood protection. + // Trying to accept a third request with two buffered responses in the queue should trigger flood + // protection. { Http::ResponseEncoder* response_encoder = nullptr; EXPECT_CALL(callbacks_, newStream(_, _)) @@ -669,10 +791,7 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtection) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); - - TestResponseHeaderMapImpl headers{{":status", "200"}}; - EXPECT_THROW_WITH_MESSAGE(response_encoder->encodeHeaders(headers, true), FrameFloodException, + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), FrameFloodException, "Too many responses queued."); EXPECT_EQ(1, store_.counter("http1.response_flood").value()); } @@ -702,7 +821,7 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { // In most tests the write output is serialized to a buffer here it is // ignored to build up queued "end connection" sentinels. EXPECT_CALL(connection_, write(_, _)) - .Times(1) + .WillOnce(Invoke([&](Buffer::Instance& data, bool) -> void { // Move the response out of data while preserving the buffer fragment sentinels. local_buffer.move(data); @@ -722,7 +841,7 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); TestHeaderMapImpl expected_headers{{":authority", "hello"}, {":path", "/"}, {":method", "GET"}}; - EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nHOST: hello\r\n\r\n"); codec_->dispatch(buffer); @@ -773,6 +892,72 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { EXPECT_EQ("http1.invalid_characters", response_encoder->getStream().responseDetails()); } +// Ensures that request headers with names containing the underscore character are allowed +// when the option is set to allow. +TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; + initialize(); + + MockRequestDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + TestHeaderMapImpl expected_headers{ + {":authority", "h.com"}, + {":path", "/"}, + {":method", "GET"}, + {"foo_bar", "bar"}, + }; + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); + + Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); + codec_->dispatch(buffer); + EXPECT_EQ(0U, buffer.length()); + EXPECT_EQ(0, store_.counter("http1.dropped_headers_with_underscores").value()); +} + +// Ensures that request headers with names containing the underscore character are dropped +// when the option is set to drop headers. +TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER; + initialize(); + + MockRequestDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + TestHeaderMapImpl expected_headers{ + {":authority", "h.com"}, + {":path", "/"}, + {":method", "GET"}, + }; + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)).Times(1); + + Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); + codec_->dispatch(buffer); + EXPECT_EQ(0U, buffer.length()); + EXPECT_EQ(1, store_.counter("http1.dropped_headers_with_underscores").value()); +} + +// Ensures that request with header names containing the underscore character are rejected +// when the option is set to reject request. +TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST; + initialize(); + + MockRequestDecoder decoder; + Http::ResponseEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder = &encoder; + return decoder; + })); + + Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, + "http/1.1 protocol error: header name contains underscores"); + EXPECT_EQ("http1.invalid_characters", response_encoder->getStream().responseDetails()); + EXPECT_EQ(1, store_.counter("http1.requests_rejected_with_underscores_in_headers").value()); +} + TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { TestScopedRuntime scoped_runtime; @@ -808,7 +993,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: bar", std::string(1, '\0'), "baz\r\n")); EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: header value contains NUL"); + "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); } // Mutate an HTTP GET with embedded NULs, this should always be rejected in some @@ -886,19 +1071,44 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); TestHeaderMapImpl expected_headers{{"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; - EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl expected_data1("12345"); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)).Times(1); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)); Buffer::OwnedImpl expected_data2; - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), true)).Times(1); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), true)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345"); codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); } +// Verify that headers and body with content length are processed correctly and data is merged +// before the decodeData call even if delivered in a buffer that holds 1 byte per slice. +TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { + initialize(); + + InSequence sequence; + + MockRequestDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + TestHeaderMapImpl expected_headers{{"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + + Buffer::OwnedImpl expected_data1("12345"); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)); + + Buffer::OwnedImpl expected_data2; + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), true)); + + Buffer::OwnedImpl buffer = + createBufferWithOneByteSlices("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345"); + codec_->dispatch(buffer); + EXPECT_EQ(0U, buffer.length()); +} + TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { initialize(); @@ -1069,7 +1279,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { ON_CALL(connection_, write(_, _)).WillByDefault(Invoke([&output](Buffer::Instance& data, bool) { // Verify that individual writes into the codec's output buffer were coalesced into a single // slice - ASSERT_EQ(1, data.getRawSlices(nullptr, 0)); + ASSERT_EQ(1, data.getRawSlices().size()); output.append(data.toString()); data.drain(data.length()); })); @@ -1262,19 +1472,19 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - EXPECT_CALL(decoder, decodeHeaders_(_, false)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl buffer( "POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\ncontent-length:5\r\n\r\n"); codec_->dispatch(buffer); Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)).Times(1); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)); codec_->dispatch(body); Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl websocket_payload("abcd"); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), false)).Times(1); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), false)); codec_->dispatch(websocket_payload); } @@ -1286,8 +1496,8 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl expected_data("12345abcd"); - EXPECT_CALL(decoder, decodeHeaders_(_, false)).Times(1); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(_, false)); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: " "foo\r\ncontent-length:5\r\n\r\n12345abcd"); codec_->dispatch(buffer); @@ -1303,8 +1513,8 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { // Even with T-E chunked, the data should neither be inspected for (the not // present in this unit test) chunks, but simply passed through. Buffer::OwnedImpl expected_data("12345abcd"); - EXPECT_CALL(decoder, decodeHeaders_(_, false)).Times(1); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(_, false)); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: " "foo\r\ntransfer-encoding: chunked\r\n\r\n12345abcd"); codec_->dispatch(buffer); @@ -1320,15 +1530,15 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { // Make sure we avoid the deferred_end_stream_headers_ optimization for // requests-with-no-body. Buffer::OwnedImpl expected_data("abcd"); - EXPECT_CALL(decoder, decodeHeaders_(_, false)).Times(1); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)).Times(1); + EXPECT_CALL(decoder, decodeHeaders_(_, false)); + EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer( "GET / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\ncontent-length: 0\r\n\r\nabcd"); codec_->dispatch(buffer); } TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { - EXPECT_CALL(connection_, bufferLimit()).Times(1).WillOnce(Return(10)); + EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); NiceMock decoder; @@ -1374,7 +1584,7 @@ class Http1ClientConnectionImplTest : public testing::Test { std::unique_ptr codec_; protected: - Stats::IsolatedStoreImpl store_; + Stats::TestUtil::TestStore store_; uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; }; @@ -1624,13 +1834,13 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { // Send body payload Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); - EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data1), false)).Times(1); + EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data1), false)); codec_->dispatch(body); // Send websocket payload Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl websocket_payload("abcd"); - EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), false)).Times(1); + EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), false)); codec_->dispatch(websocket_payload); } @@ -1649,14 +1859,14 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { // Send upgrade headers EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl expected_data("12345abcd"); - EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false)).Times(1); + EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 5\r\nConnection: " "upgrade\r\nUpgrade: websocket\r\n\r\n12345abcd"); codec_->dispatch(response); } TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { - EXPECT_CALL(connection_, bufferLimit()).Times(1).WillOnce(Return(10)); + EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); InSequence s; @@ -1721,6 +1931,37 @@ TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } +// Regression test for https://github.com/envoyproxy/envoy/issues/10655. Make sure we correctly +// handle going below low watermark when closing the connection during a completion callback. +TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { + initialize(); + + InSequence s; + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + Http::MockStreamCallbacks stream_callbacks; + request_encoder.getStream().addCallbacks(stream_callbacks); + + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + // Fake a call from the underlying Network::Connection and verify the stream is notified. + EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()); + static_cast(codec_.get()) + ->onUnderlyingConnectionAboveWriteBufferHighWatermark(); + + EXPECT_CALL(response_decoder, decodeHeaders_(_, true)) + .WillOnce(Invoke([&](ResponseHeaderMapPtr&, bool) { + // Fake a call for going below the low watermark. Make sure no stream callbacks get called. + EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(0); + static_cast(codec_.get()) + ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); + })); + Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"); + codec_->dispatch(response); +} + TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index acfb83fe7b5c..29e0f2c73ea8 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -24,6 +24,7 @@ envoy_cc_test( "//source/common/stats:stats_lib", "//test/common/http:common_lib", "//test/common/http/http2:http2_frame", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/http:http_mocks", "//test/mocks/init:init_mocks", "//test/mocks/local_info:local_info_mocks", diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 05653042c91b..69c3e47db3cc 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -10,6 +10,7 @@ #include "test/common/http/common.h" #include "test/common/http/http2/http2_frame.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/http/mocks.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" @@ -17,6 +18,7 @@ #include "test/mocks/protobuf/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "codec_impl_test_util.h" @@ -77,7 +79,7 @@ class Http2CodecImplTestFixture { max_request_headers_kb_, max_response_headers_count_); server_ = std::make_unique( server_connection_, server_callbacks_, stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_); + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -157,7 +159,7 @@ class Http2CodecImplTestFixture { const Http2SettingsTuple server_settings_; bool allow_metadata_ = false; bool stream_error_on_invalid_http_messaging_ = false; - Stats::IsolatedStoreImpl stats_store_; + Stats::TestUtil::TestStore stats_store_; envoy::config::core::v3::Http2ProtocolOptions client_http2_options_; NiceMock client_connection_; MockConnectionCallbacks client_callbacks_; @@ -188,6 +190,8 @@ class Http2CodecImplTestFixture { CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM; uint32_t max_inbound_window_update_frames_per_data_frame_sent_ = CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT; + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_{envoy::config::core::v3::HttpProtocolOptions::ALLOW}; }; class Http2CodecImplTest : public ::testing::TestWithParam, @@ -262,6 +266,7 @@ class Http2CodecImplTest : public ::testing::TestWithParamhttp1StreamEncoderOptions()); TestRequestHeaderMapImpl request_headers; HttpTestUtility::addDefaultHeaders(request_headers); @@ -993,7 +998,7 @@ TEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) { max_request_headers_kb_, max_response_headers_count_); server_ = std::make_unique( server_connection_, server_callbacks_, stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_); + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); for (int i = 0; i < 101; ++i) { request_encoder_ = &client_->newStream(response_decoder_); @@ -1125,7 +1130,7 @@ class Http2CustomSettingsTestBase : public Http2CodecImplTestFixture { : Http2CodecImplTestFixture(client_settings, server_settings), validate_client_(validate_client) {} - virtual ~Http2CustomSettingsTestBase() = default; + ~Http2CustomSettingsTestBase() override = default; // Sets the custom settings parameters specified by |parameters| in the |options| proto. void setHttp2CustomSettingsParameters(envoy::config::core::v3::Http2ProtocolOptions& options, @@ -1242,6 +1247,51 @@ TEST_P(Http2CodecImplTest, LargeRequestHeadersAccepted) { request_encoder_->encodeHeaders(request_headers, false); } +// Tests request headers with name containing underscore are dropped when the option is set to drop +// header. +TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreDropped) { + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + TestRequestHeaderMapImpl expected_headers(request_headers); + request_headers.addCopy("bad_header", "something"); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_EQ(1, stats_store_.counter("http2.dropped_headers_with_underscores").value()); +} + +// Tests that request with header names containing underscore are rejected when the option is set to +// reject request. +TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreRejectedByDefault) { + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_headers.addCopy("bad_header", "something"); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_EQ(1, stats_store_.counter("http2.requests_rejected_with_underscores_in_headers").value()); +} + +// Tests request headers with name containing underscore are allowed when the option is set to +// allow. +TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAllowed) { + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_headers.addCopy("bad_header", "something"); + TestRequestHeaderMapImpl expected_headers(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_EQ(0, stats_store_.counter("http2.dropped_headers_with_underscores").value()); +} + // This is the HTTP/2 variant of the HTTP/1 regression test for CVE-2019-18801. // Large method headers should not trigger ASSERTs or ASAN. The underlying issue // in CVE-2019-18801 only affected the HTTP/1 encoder, but we include a test diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 80e3c2d096b1..c6d859056d8a 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -38,12 +38,14 @@ class TestCodecSettingsProvider { class TestServerConnectionImpl : public ServerConnectionImpl, public TestCodecSettingsProvider { public: - TestServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, - Stats::Scope& scope, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - uint32_t max_request_headers_kb, uint32_t max_request_headers_count) + TestServerConnectionImpl( + Network::Connection& connection, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + uint32_t max_request_headers_kb, uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) : ServerConnectionImpl(connection, callbacks, scope, http2_options, max_request_headers_kb, - max_request_headers_count) {} + max_request_headers_count, headers_with_underscores_action) {} nghttp2_session* session() { return session_; } using ServerConnectionImpl::getStream; diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc index 2dcedab7c581..c55a00f21d63 100644 --- a/test/common/http/http2/frame_replay_test.cc +++ b/test/common/http/http2/frame_replay_test.cc @@ -58,7 +58,8 @@ TEST_F(RequestFrameCommentTest, SimpleExampleHuffman) { ServerCodecFrameInjector codec; TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::config::core::v3::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); @@ -134,7 +135,8 @@ TEST_F(RequestFrameCommentTest, SimpleExamplePlain) { ServerCodecFrameInjector codec; TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::config::core::v3::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); @@ -197,7 +199,8 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { ServerCodecFrameInjector codec; TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::config::core::v3::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); @@ -265,7 +268,8 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderField) { ServerCodecFrameInjector codec; TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::config::core::v3::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); diff --git a/test/common/http/http2/request_header_fuzz_test.cc b/test/common/http/http2/request_header_fuzz_test.cc index 35bce06813b3..9ac05cbfbe94 100644 --- a/test/common/http/http2/request_header_fuzz_test.cc +++ b/test/common/http/http2/request_header_fuzz_test.cc @@ -7,8 +7,6 @@ #include "test/common/http/http2/frame_replay.h" #include "test/fuzz/fuzz_runner.h" -using testing::AnyNumber; - namespace Envoy { namespace Http { namespace Http2 { @@ -18,7 +16,8 @@ void Replay(const Frame& frame, ServerCodecFrameInjector& codec) { // Create the server connection containing the nghttp2 session. TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::config::core::v3::HttpProtocolOptions::ALLOW); codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); codec.write(WellKnownFrames::defaultSettingsFrame(), connection); codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); diff --git a/test/common/http/http2/response_header_fuzz_test.cc b/test/common/http/http2/response_header_fuzz_test.cc index 09f0336ec8d3..b670b49f8795 100644 --- a/test/common/http/http2/response_header_fuzz_test.cc +++ b/test/common/http/http2/response_header_fuzz_test.cc @@ -8,8 +8,6 @@ #include "test/common/http/http2/frame_replay.h" #include "test/fuzz/fuzz_runner.h" -using testing::AnyNumber; - namespace Envoy { namespace Http { namespace Http2 { diff --git a/test/common/http/request_id_extension_uuid_impl_test.cc b/test/common/http/request_id_extension_uuid_impl_test.cc new file mode 100644 index 000000000000..fb3da43f9786 --- /dev/null +++ b/test/common/http/request_id_extension_uuid_impl_test.cc @@ -0,0 +1,182 @@ +#include + +#include "common/http/request_id_extension_uuid_impl.h" +#include "common/runtime/runtime_impl.h" + +#include "test/mocks/runtime/mocks.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using testing::Return; + +namespace Envoy { +namespace Http { + +TEST(UUIDRequestIDExtensionTest, SetRequestID) { + testing::StrictMock random; + UUIDRequestIDExtension uuid_utils(random); + TestRequestHeaderMapImpl request_headers; + + EXPECT_CALL(random, uuid()).Times(1).WillOnce(Return("first-request-id")); + uuid_utils.set(request_headers, true); + EXPECT_EQ("first-request-id", request_headers.get_(Headers::get().RequestId)); + + EXPECT_CALL(random, uuid()).Times(1).WillOnce(Return("second-request-id")); + uuid_utils.set(request_headers, true); + EXPECT_EQ("second-request-id", request_headers.get_(Headers::get().RequestId)); +} + +TEST(UUIDRequestIDExtensionTest, EnsureRequestID) { + testing::StrictMock random; + UUIDRequestIDExtension uuid_utils(random); + TestRequestHeaderMapImpl request_headers; + + EXPECT_CALL(random, uuid()).Times(1).WillOnce(Return("first-request-id")); + uuid_utils.set(request_headers, false); + EXPECT_EQ("first-request-id", request_headers.get_(Headers::get().RequestId)); + + EXPECT_CALL(random, uuid()).Times(0); + uuid_utils.set(request_headers, false); + EXPECT_EQ("first-request-id", request_headers.get_(Headers::get().RequestId)); +} + +TEST(UUIDRequestIDExtensionTest, PreserveRequestIDInResponse) { + testing::StrictMock random; + UUIDRequestIDExtension uuid_utils(random); + TestRequestHeaderMapImpl request_headers; + TestResponseHeaderMapImpl response_headers; + + uuid_utils.setInResponse(response_headers, request_headers); + EXPECT_EQ(nullptr, response_headers.get(Headers::get().RequestId)); + + request_headers.setRequestId("some-request-id"); + uuid_utils.setInResponse(response_headers, request_headers); + EXPECT_EQ("some-request-id", response_headers.get_(Headers::get().RequestId)); + + request_headers.removeRequestId(); + response_headers.setRequestId("another-request-id"); + uuid_utils.setInResponse(response_headers, request_headers); + EXPECT_EQ("another-request-id", response_headers.get_(Headers::get().RequestId)); + + request_headers.setRequestId(""); + uuid_utils.setInResponse(response_headers, request_headers); + EXPECT_EQ("", response_headers.get_(Headers::get().RequestId)); +} + +TEST(UUIDRequestIDExtensionTest, ModRequestIDBy) { + Runtime::RandomGeneratorImpl random; + UUIDRequestIDExtension uuid_utils(random); + TestRequestHeaderMapImpl request_headers; + + uint64_t result; + EXPECT_FALSE(uuid_utils.modBy(request_headers, result, 10000)); + + request_headers.setRequestId("fffffff"); + EXPECT_FALSE(uuid_utils.modBy(request_headers, result, 10000)); + + request_headers.setRequestId("fffffffz-0012-0110-00ff-0c00400600ff"); + EXPECT_FALSE(uuid_utils.modBy(request_headers, result, 10000)); + + request_headers.setRequestId("00000000-0000-0000-0000-000000000000"); + EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100)); + EXPECT_EQ(0, result); + + request_headers.setRequestId("00000001-0000-0000-0000-000000000000"); + EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100)); + EXPECT_EQ(1, result); + + request_headers.setRequestId("0000000f-0000-0000-0000-00000000000a"); + EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100)); + EXPECT_EQ(15, result); + + request_headers.setRequestId(""); + EXPECT_FALSE(uuid_utils.modBy(request_headers, result, 100)); + + request_headers.setRequestId("000000ff-0000-0000-0000-000000000000"); + EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100)); + EXPECT_EQ(55, result); + + request_headers.setRequestId("000000ff-0000-0000-0000-000000000000"); + EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 10000)); + EXPECT_EQ(255, result); + + request_headers.setRequestId("a0090100-0012-0110-00ff-0c00400600ff"); + EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 137)); + EXPECT_EQ(8, result); + + request_headers.setRequestId("ffffffff-0012-0110-00ff-0c00400600ff"); + EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 100)); + EXPECT_EQ(95, result); + + request_headers.setRequestId("ffffffff-0012-0110-00ff-0c00400600ff"); + EXPECT_TRUE(uuid_utils.modBy(request_headers, result, 10000)); + EXPECT_EQ(7295, result); +} + +TEST(UUIDRequestIDExtensionTest, RequestIDModDistribution) { + Runtime::RandomGeneratorImpl random; + UUIDRequestIDExtension uuid_utils(random); + TestRequestHeaderMapImpl request_headers; + + const int mod = 100; + const int required_percentage = 11; + int total_samples = 0; + int interesting_samples = 0; + + for (int i = 0; i < 500000; ++i) { + std::string uuid = random.uuid(); + + const char c = uuid[19]; + ASSERT_TRUE(uuid[14] == '4'); // UUID version 4 (random) + ASSERT_TRUE(c == '8' || c == '9' || c == 'a' || c == 'b'); // UUID variant 1 (RFC4122) + + uint64_t value; + request_headers.setRequestId(uuid); + ASSERT_TRUE(uuid_utils.modBy(request_headers, value, mod)); + + if (value < required_percentage) { + interesting_samples++; + } + total_samples++; + } + + EXPECT_NEAR(required_percentage / 100.0, interesting_samples * 1.0 / total_samples, 0.002); +} + +TEST(UUIDRequestIDExtensionTest, DISABLED_benchmark) { + Runtime::RandomGeneratorImpl random; + + for (int i = 0; i < 100000000; ++i) { + random.uuid(); + } +} + +TEST(UUIDRequestIDExtensionTest, SetTraceStatus) { + Runtime::RandomGeneratorImpl random; + UUIDRequestIDExtension uuid_utils(random); + TestRequestHeaderMapImpl request_headers; + request_headers.setRequestId(random.uuid()); + + EXPECT_EQ(TraceStatus::NoTrace, uuid_utils.getTraceStatus(request_headers)); + + uuid_utils.setTraceStatus(request_headers, TraceStatus::Sampled); + EXPECT_EQ(TraceStatus::Sampled, uuid_utils.getTraceStatus(request_headers)); + + uuid_utils.setTraceStatus(request_headers, TraceStatus::Client); + EXPECT_EQ(TraceStatus::Client, uuid_utils.getTraceStatus(request_headers)); + + uuid_utils.setTraceStatus(request_headers, TraceStatus::Forced); + EXPECT_EQ(TraceStatus::Forced, uuid_utils.getTraceStatus(request_headers)); + + uuid_utils.setTraceStatus(request_headers, TraceStatus::NoTrace); + EXPECT_EQ(TraceStatus::NoTrace, uuid_utils.getTraceStatus(request_headers)); + + // Invalid request ID. + request_headers.setRequestId(""); + uuid_utils.setTraceStatus(request_headers, TraceStatus::Forced); + EXPECT_EQ(request_headers.RequestId()->value().getStringView(), ""); +} + +} // namespace Http +} // namespace Envoy diff --git a/test/common/memory/BUILD b/test/common/memory/BUILD index c19ef6981a12..55aa793bcee1 100644 --- a/test/common/memory/BUILD +++ b/test/common/memory/BUILD @@ -21,7 +21,7 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/memory:heap_shrinker_lib", "//source/common/memory:stats_lib", - "//source/common/stats:isolated_store_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", "//test/mocks/server:server_mocks", "//test/test_common:simulated_time_system_lib", diff --git a/test/common/memory/heap_shrinker_test.cc b/test/common/memory/heap_shrinker_test.cc index 5ae0add6bb2d..63da4d0247c8 100644 --- a/test/common/memory/heap_shrinker_test.cc +++ b/test/common/memory/heap_shrinker_test.cc @@ -2,6 +2,7 @@ #include "common/memory/heap_shrinker.h" #include "common/memory/stats.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/simulated_time_system.h" @@ -27,7 +28,7 @@ class HeapShrinkerTest : public testing::Test { dispatcher_.run(Event::Dispatcher::RunType::NonBlock); } - Envoy::Stats::IsolatedStoreImpl stats_; + Envoy::Stats::TestUtil::TestStore stats_; Event::SimulatedTimeSystem time_system_; Api::ApiPtr api_; Event::DispatcherImpl dispatcher_; diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 6996151729c5..5c56a6262709 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -110,6 +110,7 @@ envoy_cc_test( "//source/common/network:filter_lib", "//source/common/network:listen_socket_lib", "//source/common/stats:stats_lib", + "//source/common/stream_info:stream_info_lib", "//test/mocks/network:network_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", @@ -319,3 +320,13 @@ envoy_cc_test( "//source/common/stream_info:filter_state_lib", ], ) + +envoy_cc_test( + name = "filter_matcher_test", + srcs = ["filter_matcher_test.cc"], + deps = [ + "//source/common/network:address_lib", + "//source/common/network:filter_matcher_lib", + "//test/mocks/network:network_mocks", + ], +) diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 22f4d014111e..8d9fe29f4abf 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -85,16 +85,17 @@ TEST_P(ConnectionImplDeathTest, BadFd) { Api::ApiPtr api = Api::createApiForTest(); Event::DispatcherPtr dispatcher(api->allocateDispatcher()); IoHandlePtr io_handle = std::make_unique(); + StreamInfo::StreamInfoImpl stream_info(dispatcher->timeSource()); EXPECT_DEATH_LOG_TO_STDERR( ConnectionImpl(*dispatcher, std::make_unique(std::move(io_handle), nullptr, nullptr), - Network::Test::createRawBufferSocket(), false), + Network::Test::createRawBufferSocket(), stream_info, false), ".*assert failure: SOCKET_VALID\\(ioHandle\\(\\)\\.fd\\(\\)\\).*"); } class ConnectionImplTest : public testing::TestWithParam { protected: - ConnectionImplTest() : api_(Api::createApiForTest(time_system_)) {} + ConnectionImplTest() : api_(Api::createApiForTest(time_system_)), stream_info_(time_system_) {} void setUpBasicConnection() { if (dispatcher_.get() == nullptr) { @@ -116,11 +117,11 @@ class ConnectionImplTest : public testing::TestWithParam { void connect() { int expected_callbacks = 2; client_connection_->connect(); - read_filter_.reset(new NiceMock()); + read_filter_ = std::make_shared>(); EXPECT_CALL(listener_callbacks_, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection_ = dispatcher_->createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); server_connection_->addConnectionCallbacks(server_callbacks_); server_connection_->addReadFilter(read_filter_); @@ -228,6 +229,7 @@ class ConnectionImplTest : public testing::TestWithParam { MockWatermarkBuffer* client_write_buffer_ = nullptr; Address::InstanceConstSharedPtr source_address_; Socket::OptionsSharedPtr socket_options_; + StreamInfo::StreamInfoImpl stream_info_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, ConnectionImplTest, @@ -256,12 +258,12 @@ TEST_P(ConnectionImplTest, CloseDuringConnectCallback) { })); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)); - read_filter_.reset(new NiceMock()); + read_filter_ = std::make_shared>(); EXPECT_CALL(listener_callbacks_, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection_ = dispatcher_->createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); server_connection_->addConnectionCallbacks(server_callbacks_); server_connection_->addReadFilter(read_filter_); })); @@ -281,9 +283,9 @@ TEST_P(ConnectionImplTest, ImmediateConnectError) { socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), nullptr, true); if (socket_->localAddress()->ip()->version() == Address::IpVersion::v4) { - broadcast_address.reset(new Address::Ipv4Instance("224.0.0.1", 0)); + broadcast_address = std::make_shared("224.0.0.1", 0); } else { - broadcast_address.reset(new Address::Ipv6Instance("ff02::1", 0)); + broadcast_address = std::make_shared("ff02::1", 0); } client_connection_ = dispatcher_->createClientConnection( @@ -313,7 +315,7 @@ TEST_P(ConnectionImplTest, SocketOptions) { })); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)); - read_filter_.reset(new NiceMock()); + read_filter_ = std::make_shared>(); auto option = std::make_shared(); @@ -323,7 +325,7 @@ TEST_P(ConnectionImplTest, SocketOptions) { .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { socket->addOption(option); server_connection_ = dispatcher_->createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); server_connection_->addConnectionCallbacks(server_callbacks_); server_connection_->addReadFilter(read_filter_); @@ -362,7 +364,7 @@ TEST_P(ConnectionImplTest, SocketOptionsFailureTest) { })); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)); - read_filter_.reset(new NiceMock()); + read_filter_ = std::make_shared>(); auto option = std::make_shared(); @@ -372,7 +374,7 @@ TEST_P(ConnectionImplTest, SocketOptionsFailureTest) { .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { socket->addOption(option); server_connection_ = dispatcher_->createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); server_connection_->addConnectionCallbacks(server_callbacks_); server_connection_->addReadFilter(read_filter_); @@ -442,12 +444,12 @@ TEST_P(ConnectionImplTest, ConnectionStats) { EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)).InSequence(s1); EXPECT_CALL(client_connection_stats.tx_total_, add(4)).InSequence(s1); - read_filter_.reset(new NiceMock()); + read_filter_ = std::make_shared>(); MockConnectionStats server_connection_stats; EXPECT_CALL(listener_callbacks_, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection_ = dispatcher_->createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); server_connection_->addConnectionCallbacks(server_callbacks_); server_connection_->setConnectionStats(server_connection_stats.toBufferStats()); server_connection_->addReadFilter(read_filter_); @@ -1273,7 +1275,7 @@ TEST_P(ConnectionImplTest, FlushWriteAndDelayConfigDisabledTest) { IoHandlePtr io_handle = std::make_unique(0); std::unique_ptr server_connection(new Network::ConnectionImpl( dispatcher, std::make_unique(std::move(io_handle), nullptr, nullptr), - std::make_unique>(), true)); + std::make_unique>(), stream_info_, true)); time_system_.setMonotonicTime(std::chrono::milliseconds(0)); @@ -1305,7 +1307,7 @@ TEST_P(ConnectionImplTest, DelayedCloseTimerResetWithPendingWriteBufferFlushes) auto server_connection = std::make_unique( *mocks.dispatcher_, std::make_unique(std::move(io_handle), nullptr, nullptr), - std::move(mocks.transport_socket_), true); + std::move(mocks.transport_socket_), stream_info_, true); InSequence s1; // The actual timeout is insignificant, we just need to enable delayed close processing by @@ -1357,7 +1359,7 @@ TEST_P(ConnectionImplTest, DelayedCloseTimeoutDisableOnSocketClose) { auto server_connection = std::make_unique( *mocks.dispatcher_, std::make_unique(std::move(io_handle), nullptr, nullptr), - std::move(mocks.transport_socket_), true); + std::move(mocks.transport_socket_), stream_info_, true); InSequence s1; @@ -1393,7 +1395,7 @@ TEST_P(ConnectionImplTest, DelayedCloseTimeoutNullStats) { auto server_connection = std::make_unique( *mocks.dispatcher_, std::make_unique(std::move(io_handle), nullptr, nullptr), - std::move(mocks.transport_socket_), true); + std::move(mocks.transport_socket_), stream_info_, true); InSequence s1; @@ -1452,7 +1454,7 @@ class FakeReadFilter : public Network::ReadFilter { class MockTransportConnectionImplTest : public testing::Test { public: - MockTransportConnectionImplTest() { + MockTransportConnectionImplTest() : stream_info_(dispatcher_.timeSource()) { EXPECT_CALL(dispatcher_.buffer_factory_, create_(_, _)) .WillRepeatedly(Invoke([](std::function below_low, std::function above_high) -> Buffer::Instance* { @@ -1470,7 +1472,7 @@ class MockTransportConnectionImplTest : public testing::Test { IoHandlePtr io_handle = std::make_unique(0); connection_ = std::make_unique( dispatcher_, std::make_unique(std::move(io_handle), nullptr, nullptr), - TransportSocketPtr(transport_socket_), true); + TransportSocketPtr(transport_socket_), stream_info_, true); connection_->addConnectionCallbacks(callbacks_); } @@ -1491,6 +1493,7 @@ class MockTransportConnectionImplTest : public testing::Test { Event::MockFileEvent* file_event_; Event::FileReadyCb file_ready_cb_; TransportSocketCallbacks* transport_socket_callbacks_; + StreamInfo::StreamInfoImpl stream_info_; }; // The purpose of this case is to verify the destructor order of the object. @@ -2000,11 +2003,11 @@ class ReadBufferLimitTest : public ConnectionImplTest { client_connection_->addConnectionCallbacks(client_callbacks_); client_connection_->connect(); - read_filter_.reset(new NiceMock()); + read_filter_ = std::make_shared>(); EXPECT_CALL(listener_callbacks_, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection_ = dispatcher_->createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); server_connection_->setBufferLimits(read_buffer_limit); server_connection_->addReadFilter(read_filter_); EXPECT_EQ("", server_connection_->nextProtocol()); diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 10338eedc83c..e759e862304d 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -21,6 +21,7 @@ #include "common/network/filter_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/utility.h" +#include "common/stream_info/stream_info_impl.h" #include "test/mocks/network/mocks.h" #include "test/test_common/environment.h" @@ -265,11 +266,12 @@ class TestDnsServerQuery { class TestDnsServer : public ListenerCallbacks { public: - TestDnsServer(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher), record_ttl_(0) {} + TestDnsServer(Event::Dispatcher& dispatcher) + : dispatcher_(dispatcher), record_ttl_(0), stream_info_(dispatcher.timeSource()) {} void onAccept(ConnectionSocketPtr&& socket) override { Network::ConnectionPtr new_connection = dispatcher_.createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); TestDnsServerQuery* query = new TestDnsServerQuery(std::move(new_connection), hosts_a_, hosts_aaaa_, cnames_, record_ttl_, refused_); queries_.emplace_back(query); @@ -301,6 +303,7 @@ class TestDnsServer : public ListenerCallbacks { // All queries are tracked so we can do resource reclamation when the test is // over. std::vector> queries_; + StreamInfo::StreamInfoImpl stream_info_; }; } // namespace @@ -479,13 +482,13 @@ class DnsImplTest : public testing::TestWithParam { EXPECT_EQ(expected_results, address_as_string_list); } - for (auto expected_absent_result : expected_absent_results) { + for (const auto& expected_absent_result : expected_absent_results) { EXPECT_THAT(address_as_string_list, Not(Contains(expected_absent_result))); } if (expected_ttl) { std::list address_list = getAddressList(results); - for (auto address : results) { + for (const auto& address : results) { EXPECT_EQ(address.ttl_, expected_ttl.value()); } } diff --git a/test/common/network/filter_matcher_test.cc b/test/common/network/filter_matcher_test.cc new file mode 100644 index 000000000000..2668400adbc1 --- /dev/null +++ b/test/common/network/filter_matcher_test.cc @@ -0,0 +1,113 @@ +#include "common/network/address_impl.h" +#include "common/network/filter_matcher.h" + +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::ReturnRef; + +namespace Envoy { +namespace Network { +namespace { +struct CallbackHandle { + std::unique_ptr callback_; + std::unique_ptr socket_; + Address::InstanceConstSharedPtr address_; +}; +} // namespace +class ListenerFilterMatcherTest : public testing::Test { +public: + CallbackHandle createCallbackOnPort(int port) { + CallbackHandle handle; + handle.address_ = std::make_shared("127.0.0.1", port); + handle.socket_ = std::make_unique(); + handle.callback_ = std::make_unique(); + EXPECT_CALL(*handle.socket_, localAddress()).WillRepeatedly(ReturnRef(handle.address_)); + EXPECT_CALL(*handle.callback_, socket()).WillRepeatedly(ReturnRef(*handle.socket_)); + return handle; + } + envoy::config::listener::v3::ListenerFilterChainMatchPredicate createPortPredicate(int port_start, + int port_end) { + envoy::config::listener::v3::ListenerFilterChainMatchPredicate pred; + auto ports = pred.mutable_destination_port_range(); + ports->set_start(port_start); + ports->set_end(port_end); + return pred; + } +}; + +TEST_F(ListenerFilterMatcherTest, DstPortMatcher) { + auto pred = createPortPredicate(80, 81); + auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(pred); + auto handle79 = createCallbackOnPort(79); + auto handle80 = createCallbackOnPort(80); + auto handle81 = createCallbackOnPort(81); + EXPECT_FALSE(matcher->matches(*handle79.callback_)); + EXPECT_TRUE(matcher->matches(*handle80.callback_)); + EXPECT_FALSE(matcher->matches(*handle81.callback_)); +} + +TEST_F(ListenerFilterMatcherTest, AnyMatdcher) { + envoy::config::listener::v3::ListenerFilterChainMatchPredicate pred; + pred.set_any_match(true); + auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(pred); + auto handle79 = createCallbackOnPort(79); + auto handle80 = createCallbackOnPort(80); + auto handle81 = createCallbackOnPort(81); + EXPECT_TRUE(matcher->matches(*handle79.callback_)); + EXPECT_TRUE(matcher->matches(*handle80.callback_)); + EXPECT_TRUE(matcher->matches(*handle81.callback_)); +} + +TEST_F(ListenerFilterMatcherTest, NotMatcher) { + auto pred = createPortPredicate(80, 81); + envoy::config::listener::v3::ListenerFilterChainMatchPredicate not_pred; + not_pred.mutable_not_match()->MergeFrom(pred); + auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(not_pred); + auto handle79 = createCallbackOnPort(79); + auto handle80 = createCallbackOnPort(80); + auto handle81 = createCallbackOnPort(81); + EXPECT_TRUE(matcher->matches(*handle79.callback_)); + EXPECT_FALSE(matcher->matches(*handle80.callback_)); + EXPECT_TRUE(matcher->matches(*handle81.callback_)); +} + +TEST_F(ListenerFilterMatcherTest, OrMatcher) { + auto pred80 = createPortPredicate(80, 81); + auto pred443 = createPortPredicate(443, 444); + + envoy::config::listener::v3::ListenerFilterChainMatchPredicate pred; + pred.mutable_or_match()->mutable_rules()->Add()->MergeFrom(pred80); + pred.mutable_or_match()->mutable_rules()->Add()->MergeFrom(pred443); + + auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(pred); + auto handle80 = createCallbackOnPort(80); + auto handle443 = createCallbackOnPort(443); + auto handle3306 = createCallbackOnPort(3306); + + EXPECT_FALSE(matcher->matches(*handle3306.callback_)); + EXPECT_TRUE(matcher->matches(*handle80.callback_)); + EXPECT_TRUE(matcher->matches(*handle443.callback_)); +} + +TEST_F(ListenerFilterMatcherTest, AndMatcher) { + auto pred80_3306 = createPortPredicate(80, 3306); + auto pred443_3306 = createPortPredicate(443, 3306); + + envoy::config::listener::v3::ListenerFilterChainMatchPredicate pred; + pred.mutable_and_match()->mutable_rules()->Add()->MergeFrom(pred80_3306); + pred.mutable_and_match()->mutable_rules()->Add()->MergeFrom(pred443_3306); + + auto matcher = ListenerFilterMatcherBuilder::buildListenerFilterMatcher(pred); + auto handle80 = createCallbackOnPort(80); + auto handle443 = createCallbackOnPort(443); + auto handle3306 = createCallbackOnPort(3306); + + EXPECT_FALSE(matcher->matches(*handle3306.callback_)); + EXPECT_FALSE(matcher->matches(*handle80.callback_)); + EXPECT_TRUE(matcher->matches(*handle443.callback_)); +} +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index 0ca9b283bf8a..9f9d6bb115e4 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -39,10 +39,11 @@ static void errorCallbackTest(Address::IpVersion version) { Network::Test::createRawBufferSocket(), nullptr); client_connection->connect(); + StreamInfo::StreamInfoImpl stream_info(dispatcher->timeSource()); EXPECT_CALL(listener_callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void { Network::ConnectionPtr conn = dispatcher->createServerConnection( - std::move(accepted_socket), Network::Test::createRawBufferSocket()); + std::move(accepted_socket), Network::Test::createRawBufferSocket(), stream_info); client_connection->close(ConnectionCloseType::NoFlush); conn->close(ConnectionCloseType::NoFlush); socket->close(); @@ -122,11 +123,12 @@ TEST_P(ListenerImplTest, UseActualDst) { EXPECT_CALL(listener, getLocalAddress(_)).Times(0); + StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); EXPECT_CALL(listener_callbacks2, onAccept_(_)).Times(0); EXPECT_CALL(listener_callbacks1, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void { Network::ConnectionPtr conn = dispatcher_->createServerConnection( - std::move(accepted_socket), Network::Test::createRawBufferSocket()); + std::move(accepted_socket), Network::Test::createRawBufferSocket(), stream_info); EXPECT_EQ(*conn->localAddress(), *socket->localAddress()); client_connection->close(ConnectionCloseType::NoFlush); conn->close(ConnectionCloseType::NoFlush); @@ -153,10 +155,11 @@ TEST_P(ListenerImplTest, WildcardListenerUseActualDst) { EXPECT_CALL(listener, getLocalAddress(_)).WillOnce(Return(local_dst_address)); + StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); EXPECT_CALL(listener_callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { Network::ConnectionPtr conn = dispatcher_->createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info); EXPECT_EQ(*conn->localAddress(), *local_dst_address); client_connection->close(ConnectionCloseType::NoFlush); conn->close(ConnectionCloseType::NoFlush); @@ -201,10 +204,11 @@ TEST_P(ListenerImplTest, WildcardListenerIpv4Compat) { return Address::addressFromFd(fd); })); + StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); EXPECT_CALL(listener_callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { Network::ConnectionPtr conn = dispatcher_->createServerConnection( - std::move(socket), Network::Test::createRawBufferSocket()); + std::move(socket), Network::Test::createRawBufferSocket(), stream_info); EXPECT_EQ(conn->localAddress()->ip()->version(), conn->remoteAddress()->ip()->version()); EXPECT_EQ(conn->localAddress()->asString(), local_dst_address->asString()); EXPECT_EQ(*conn->localAddress(), *local_dst_address); diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index 1bdd6790e1fc..96c8f235df60 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -1,3 +1,4 @@ +#include #include #include #include @@ -81,9 +82,18 @@ class UdpListenerImplTest : public ListenerImplTestBase { client_.localAddress()->ip()->addressAsString()); EXPECT_EQ(*data.addresses_.local_, *send_to_addr_); - EXPECT_EQ(time_system_.monotonicTime(), data.receive_time_); + + size_t num_packet_per_recv = 1u; + if (Api::OsSysCallsSingleton::get().supportsMmsg()) { + num_packet_per_recv = 16u; + } + EXPECT_EQ(time_system_.monotonicTime(), + data.receive_time_ + + std::chrono::milliseconds( + (num_packets_received_by_listener_ % num_packet_per_recv) * 100)); // Advance time so that next onData() should have different received time. time_system_.sleep(std::chrono::milliseconds(100)); + ++num_packets_received_by_listener_; } SocketSharedPtr server_socket_; @@ -91,6 +101,7 @@ class UdpListenerImplTest : public ListenerImplTestBase { Address::InstanceConstSharedPtr send_to_addr_; MockUdpListenerCallbacks listener_callbacks_; std::unique_ptr listener_; + size_t num_packets_received_by_listener_{0}; }; INSTANTIATE_TEST_SUITE_P(IpVersions, UdpListenerImplTest, @@ -158,11 +169,14 @@ TEST_P(UdpListenerImplTest, UseActualDstUdp) { * Tests UDP listener for read and write callbacks with actual data. */ TEST_P(UdpListenerImplTest, UdpEcho) { - // We send 2 packets and expect it to echo. - const std::string first("first"); - client_.write(first, *send_to_addr_); - const std::string second("second"); - client_.write(second, *send_to_addr_); + // We send 17 packets and expect it to echo. + absl::FixedArray client_data({"first", "second", "third", "forth", "fifth", "sixth", + "seventh", "eighth", "ninth", "tenth", "eleventh", + "twelveth", "thirteenth", "fourteenth", "fifteenth", + "sixteenth", "seventeenth"}); + for (const auto& i : client_data) { + client_.write(i, *send_to_addr_); + } // For unit test purposes, we assume that the data was received in order. Address::InstanceConstSharedPtr test_peer_address; @@ -177,15 +191,15 @@ TEST_P(UdpListenerImplTest, UdpEcho) { test_peer_address = data.addresses_.peer_; const std::string data_str = data.buffer_->toString(); - EXPECT_EQ(data_str, first); + EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]); server_received_data.push_back(data_str); })) - .WillOnce(Invoke([&](const UdpRecvData& data) -> void { + .WillRepeatedly(Invoke([&](const UdpRecvData& data) -> void { validateRecvCallbackParams(data); const std::string data_str = data.buffer_->toString(); - EXPECT_EQ(data_str, second); + EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]); server_received_data.push_back(data_str); })); @@ -301,6 +315,7 @@ TEST_P(UdpListenerImplTest, UdpListenerRecvMsgError) { // Inject mocked OsSysCalls implementation to mock a read failure. Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + EXPECT_CALL(os_sys_calls, supportsMmsg()); EXPECT_CALL(os_sys_calls, recvmsg(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, ENOTSUP})); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -317,30 +332,30 @@ TEST_P(UdpListenerImplTest, SendData) { Buffer::InstancePtr buffer(new Buffer::OwnedImpl()); buffer->add(payload); // Use a self address that is unlikely to be picked by source address discovery - // algorithm if not specified in recvmsg. Port is not taken into + // algorithm if not specified in recvmsg/recvmmsg. Port is not taken into // consideration. Address::InstanceConstSharedPtr send_from_addr; if (version_ == Address::IpVersion::v4) { // Linux kernel regards any 127.x.x.x as local address. But Mac OS doesn't. - send_from_addr.reset(new Address::Ipv4Instance( + send_from_addr = std::make_shared( #ifndef __APPLE__ "127.1.2.3", #else "127.0.0.1", #endif - server_socket_->localAddress()->ip()->port())); + server_socket_->localAddress()->ip()->port()); } else { // Only use non-local v6 address if IP_FREEBIND is supported. Otherwise use // ::1 to avoid EINVAL error. Unfortunately this can't verify that sendmsg with // customized source address is doing the work because kernel also picks ::1 // if it's not specified in cmsghdr. - send_from_addr.reset(new Address::Ipv6Instance( + send_from_addr = std::make_shared( #ifdef IP_FREEBIND "::9", #else "::1", #endif - server_socket_->localAddress()->ip()->port())); + server_socket_->localAddress()->ip()->port()); } UdpSendData send_data{send_from_addr->ip(), *client_.localAddress(), *buffer}; diff --git a/test/common/network/utility_test.cc b/test/common/network/utility_test.cc index 05f768ede2f1..120f13615c82 100644 --- a/test/common/network/utility_test.cc +++ b/test/common/network/utility_test.cc @@ -1,5 +1,6 @@ #include #include +#include #include #include "envoy/common/exception.h" @@ -179,52 +180,52 @@ TEST(NetworkUtility, LocalConnection) { EXPECT_CALL(socket, localAddress()).WillRepeatedly(testing::ReturnRef(local_addr)); EXPECT_CALL(socket, remoteAddress()).WillRepeatedly(testing::ReturnRef(remote_addr)); - local_addr.reset(new Network::Address::Ipv4Instance("127.0.0.1")); - remote_addr.reset(new Network::Address::PipeInstance("/pipe/path")); + local_addr = std::make_shared("127.0.0.1"); + remote_addr = std::make_shared("/pipe/path"); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - local_addr.reset(new Network::Address::PipeInstance("/pipe/path")); - remote_addr.reset(new Network::Address::PipeInstance("/pipe/path")); + local_addr = std::make_shared("/pipe/path"); + remote_addr = std::make_shared("/pipe/path"); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - local_addr.reset(new Network::Address::Ipv4Instance("127.0.0.1")); - remote_addr.reset(new Network::Address::Ipv4Instance("127.0.0.1")); + local_addr = std::make_shared("127.0.0.1"); + remote_addr = std::make_shared("127.0.0.1"); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - local_addr.reset(new Network::Address::Ipv4Instance("127.0.0.2")); + local_addr = std::make_shared("127.0.0.2"); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - local_addr.reset(new Network::Address::Ipv4Instance("4.4.4.4")); - remote_addr.reset(new Network::Address::Ipv4Instance("8.8.8.8")); + local_addr = std::make_shared("4.4.4.4"); + remote_addr = std::make_shared("8.8.8.8"); EXPECT_FALSE(Utility::isSameIpOrLoopback(socket)); - local_addr.reset(new Network::Address::Ipv4Instance("4.4.4.4")); - remote_addr.reset(new Network::Address::Ipv4Instance("4.4.4.4")); + local_addr = std::make_shared("4.4.4.4"); + remote_addr = std::make_shared("4.4.4.4"); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - local_addr.reset(new Network::Address::Ipv4Instance("4.4.4.4", 1234)); - remote_addr.reset(new Network::Address::Ipv4Instance("4.4.4.4", 4321)); + local_addr = std::make_shared("4.4.4.4", 1234); + remote_addr = std::make_shared("4.4.4.4", 4321); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - local_addr.reset(new Network::Address::Ipv6Instance("::1")); - remote_addr.reset(new Network::Address::Ipv6Instance("::1")); + local_addr = std::make_shared("::1"); + remote_addr = std::make_shared("::1"); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - local_addr.reset(new Network::Address::Ipv6Instance("::2")); - remote_addr.reset(new Network::Address::Ipv6Instance("::1")); + local_addr = std::make_shared("::2"); + remote_addr = std::make_shared("::1"); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - remote_addr.reset(new Network::Address::Ipv6Instance("::3")); + remote_addr = std::make_shared("::3"); EXPECT_FALSE(Utility::isSameIpOrLoopback(socket)); - remote_addr.reset(new Network::Address::Ipv6Instance("::2")); + remote_addr = std::make_shared("::2"); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - remote_addr.reset(new Network::Address::Ipv6Instance("::2", 4321)); - local_addr.reset(new Network::Address::Ipv6Instance("::2", 1234)); + remote_addr = std::make_shared("::2", 4321); + local_addr = std::make_shared("::2", 1234); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - remote_addr.reset(new Network::Address::Ipv6Instance("fd00::")); + remote_addr = std::make_shared("fd00::"); EXPECT_FALSE(Utility::isSameIpOrLoopback(socket)); } diff --git a/test/common/protobuf/BUILD b/test/common/protobuf/BUILD index 66c0c85f0328..c6709f80e46f 100644 --- a/test/common/protobuf/BUILD +++ b/test/common/protobuf/BUILD @@ -14,6 +14,7 @@ envoy_cc_test( srcs = ["message_validator_impl_test.cc"], deps = [ "//source/common/protobuf:message_validator_lib", + "//test/common/stats:stat_test_utility_lib", "//test/test_common:logging_lib", "//test/test_common:utility_lib", ], @@ -25,7 +26,7 @@ envoy_cc_test( deps = [ "//source/common/config:api_version_lib", "//source/common/protobuf:utility_lib", - "//source/common/stats:isolated_store_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/init:init_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", diff --git a/test/common/protobuf/message_validator_impl_test.cc b/test/common/protobuf/message_validator_impl_test.cc index cc58c6fd205b..fd5433704584 100644 --- a/test/common/protobuf/message_validator_impl_test.cc +++ b/test/common/protobuf/message_validator_impl_test.cc @@ -3,6 +3,7 @@ #include "common/protobuf/message_validator_impl.h" #include "common/stats/isolated_store_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/test_common/logging.h" #include "test/test_common/utility.h" @@ -20,7 +21,7 @@ TEST(NullValidationVisitorImpl, UnknownField) { // The warning validation visitor logs and bumps stats on unknown fields TEST(WarningValidationVisitorImpl, UnknownField) { - Stats::IsolatedStoreImpl stats; + Stats::TestUtil::TestStore stats; Stats::Counter& counter = stats.counter("counter"); WarningValidationVisitorImpl warning_validation_visitor; // First time around we should log. diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 45efbe1d3a42..13b4a40e09b3 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -16,8 +16,8 @@ #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" #include "common/runtime/runtime_impl.h" -#include "common/stats/isolated_store_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" @@ -1392,7 +1392,7 @@ class DeprecatedFieldsTest : public testing::TestWithParam { const bool with_upgrade_; Event::MockDispatcher dispatcher_; NiceMock tls_; - Stats::IsolatedStoreImpl store_; + Stats::TestUtil::TestStore store_; Runtime::MockRandomGenerator generator_; Api::ApiPtr api_; Runtime::MockRandomGenerator rand_; diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index d232c86b26c9..fd61d51ff961 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -6760,6 +6760,10 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { ProtobufTypes::MessagePtr createEmptyRouteConfigProto() override { return ProtobufTypes::MessagePtr{new ProtobufWkt::Timestamp()}; } + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Override this to guarantee that we have a different factory mapping by-type. + return ProtobufTypes::MessagePtr{new ProtobufWkt::Timestamp()}; + } Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfig(const Protobuf::Message& message, Server::Configuration::ServerFactoryContext&, @@ -6778,7 +6782,7 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } ProtobufTypes::MessagePtr createEmptyRouteConfigProto() override { - return ProtobufTypes::MessagePtr{new ProtobufWkt::Timestamp()}; + return ProtobufTypes::MessagePtr{new ProtobufWkt::Struct()}; } }; @@ -6930,7 +6934,7 @@ name: foo route: { cluster: baz } typed_per_filter_config: test.default.filter: - "@type": type.googleapis.com/google.protobuf.Timestamp + "@type": type.googleapis.com/google.protobuf.Struct value: seconds: 123 )EOF"; diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index 8f87363613e3..81044fddb558 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -74,6 +74,26 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalAddressVariab testFormatting("DOWNSTREAM_LOCAL_ADDRESS", "127.0.0.2:0"); } +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalPortVariable) { + NiceMock stream_info; + // Validate for IPv4 address + auto address = Network::Address::InstanceConstSharedPtr{ + new Network::Address::Ipv4Instance("127.1.2.3", 8443)}; + EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); + testFormatting(stream_info, "DOWNSTREAM_LOCAL_PORT", "8443"); + + // Validate for IPv6 address + address = + Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance("::1", 9443)}; + EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); + testFormatting(stream_info, "DOWNSTREAM_LOCAL_PORT", "9443"); + + // Validate for Pipe + address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance("/foo")}; + EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); + testFormatting(stream_info, "DOWNSTREAM_LOCAL_PORT", ""); +} + TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalAddressWithoutPortVariable) { testFormatting("DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT", "127.0.0.2"); } @@ -716,6 +736,7 @@ TEST(HeaderParserTest, TestParseInternal) { {"%DOWNSTREAM_REMOTE_ADDRESS%", {"127.0.0.1:0"}, {}}, {"%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%", {"127.0.0.1"}, {}}, {"%DOWNSTREAM_LOCAL_ADDRESS%", {"127.0.0.2:0"}, {}}, + {"%DOWNSTREAM_LOCAL_PORT%", {"0"}, {}}, {"%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%", {"127.0.0.2"}, {}}, {"%UPSTREAM_METADATA([\"ns\", \"key\"])%", {"value"}, {}}, {"[%UPSTREAM_METADATA([\"ns\", \"key\"])%", {"[value"}, {}}, diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 0c0b5a0828ed..ad211cedc787 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -32,7 +32,6 @@ using testing::_; using testing::Eq; using testing::InSequence; using testing::Invoke; -using testing::Return; using testing::ReturnRef; namespace Envoy { diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index 676c87d8dbae..6b55caf21288 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -40,8 +40,8 @@ class RouterRetryStateImplTest : public testing::Test { } void setup(Http::RequestHeaderMap& request_headers) { - state_ = RetryStateImpl::create(policy_, request_headers, cluster_, runtime_, random_, - dispatcher_, Upstream::ResourcePriority::Default); + state_ = RetryStateImpl::create(policy_, request_headers, cluster_, &virtual_cluster_, runtime_, + random_, dispatcher_, Upstream::ResourcePriority::Default); } void expectTimerCreateAndEnable() { @@ -87,10 +87,44 @@ class RouterRetryStateImplTest : public testing::Test { resource_manager_cleanup_tasks_.clear(); } + void verifyPolicyWithRemoteResponse(const std::string& retry_on, + const std::string& response_status, const bool is_grpc) { + Http::TestRequestHeaderMapImpl request_headers; + if (is_grpc) { + request_headers.setEnvoyRetryGrpcOn(retry_on); + } else { + request_headers.setEnvoyRetryOn(retry_on); + } + setup(request_headers); + EXPECT_TRUE(state_->enabled()); + + Http::TestResponseHeaderMapImpl response_headers; + if (is_grpc) { + response_headers.setStatus("200"); + response_headers.setGrpcStatus(response_status); + } else { + response_headers.setStatus(response_status); + } + + expectTimerCreateAndEnable(); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->invokeCallback(); + + EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, + state_->shouldRetryHeaders(response_headers, callback_)); + + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value()); + } + void TearDown() override { cleanupOutstandingResources(); } NiceMock policy_; NiceMock cluster_; + TestVirtualCluster virtual_cluster_; NiceMock runtime_; NiceMock random_; Event::MockDispatcher dispatcher_; @@ -125,6 +159,11 @@ TEST_F(RouterRetryStateImplTest, PolicyRefusedStream) { EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_refused_stream_reset_, callback_)); + + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value()); } TEST_F(RouterRetryStateImplTest, Policy5xxResetOverflow) { @@ -145,21 +184,16 @@ TEST_F(RouterRetryStateImplTest, Policy5xxRemoteReset) { retry_timer_->invokeCallback(); EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_reset_, callback_)); + + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value()); } TEST_F(RouterRetryStateImplTest, Policy5xxRemote503) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-on", "5xx"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "503"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); - - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("5xx" /* retry_on */, "503" /* response_status */, + false /* is_grpc */); } TEST_F(RouterRetryStateImplTest, Policy5xxRemote503Overloaded) { @@ -183,48 +217,18 @@ TEST_F(RouterRetryStateImplTest, PolicyResourceExhaustedRemoteRateLimited) { } TEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemote502) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-on", "gateway-error"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "502"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); - - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("gateway-error" /* retry_on */, "502" /* response_status */, + false /* is_grpc */); } TEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemote503) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-on", "gateway-error"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "503"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); - - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("gateway-error" /* retry_on */, "503" /* response_status */, + false /* is_grpc */); } TEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemote504) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-on", "gateway-error"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "504"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); - - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("gateway-error" /* retry_on */, "504" /* response_status */, + false /* is_grpc */); } TEST_F(RouterRetryStateImplTest, PolicyGatewayErrorResetOverflow) { @@ -245,81 +249,36 @@ TEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemoteReset) { retry_timer_->invokeCallback(); EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_reset_, callback_)); + + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value()); } TEST_F(RouterRetryStateImplTest, PolicyGrpcCancelled) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-grpc-on", "cancelled"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"grpc-status", "1"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); - - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("cancelled" /* retry_on */, "1" /* response_status */, + true /* is_grpc */); } TEST_F(RouterRetryStateImplTest, PolicyGrpcDeadlineExceeded) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-grpc-on", "deadline-exceeded"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"grpc-status", "4"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); - - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("deadline-exceeded" /* retry_on */, "4" /* response_status */, + true /* is_grpc */); } TEST_F(RouterRetryStateImplTest, PolicyGrpcResourceExhausted) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-grpc-on", "resource-exhausted"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"grpc-status", "8"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); - - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("resource-exhausted" /* retry_on */, "8" /* response_status */, + true /* is_grpc */); } TEST_F(RouterRetryStateImplTest, PolicyGrpcUnavilable) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-grpc-on", "unavailable"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"grpc-status", "14"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); - - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("unavailable" /* retry_on */, "14" /* response_status */, + true /* is_grpc */); } TEST_F(RouterRetryStateImplTest, PolicyGrpcInternal) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-grpc-on", "internal"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"grpc-status", "13"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); - - EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, - state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("internal" /* retry_on */, "13" /* response_status */, + true /* is_grpc */); } TEST_F(RouterRetryStateImplTest, Policy5xxRemote200RemoteReset) { @@ -332,6 +291,11 @@ TEST_F(RouterRetryStateImplTest, Policy5xxRemote200RemoteReset) { expectTimerCreateAndEnable(); EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryReset(remote_reset_, callback_)); EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_reset_, callback_)); + + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value()); } TEST_F(RouterRetryStateImplTest, RuntimeGuard) { @@ -363,15 +327,7 @@ TEST_F(RouterRetryStateImplTest, PolicyConnectFailureResetConnectFailure) { } TEST_F(RouterRetryStateImplTest, PolicyRetriable4xxRetry) { - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-on", "retriable-4xx"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "409"}}; - expectTimerCreateAndEnable(); - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); - EXPECT_CALL(callback_ready_, ready()); - retry_timer_->invokeCallback(); + verifyPolicyWithRemoteResponse("retriable-4xx", "409", false /* is_grpc */); } TEST_F(RouterRetryStateImplTest, PolicyRetriable4xxNoRetry) { @@ -393,14 +349,7 @@ TEST_F(RouterRetryStateImplTest, PolicyRetriable4xxReset) { TEST_F(RouterRetryStateImplTest, RetriableStatusCodes) { policy_.retriable_status_codes_.push_back(409); - Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-on", "retriable-status-codes"}}; - setup(request_headers); - EXPECT_TRUE(state_->enabled()); - - expectTimerCreateAndEnable(); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "409"}}; - EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); + verifyPolicyWithRemoteResponse("retriable-status-codes", "409", false /* is_grpc */); } TEST_F(RouterRetryStateImplTest, RetriableStatusCodesUpstreamReset) { @@ -691,6 +640,11 @@ TEST_F(RouterRetryStateImplTest, PolicyResetRemoteReset) { retry_timer_->invokeCallback(); EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(remote_reset_, callback_)); + + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value()); } TEST_F(RouterRetryStateImplTest, PolicyLimitedByRequestHeaders) { @@ -766,6 +720,11 @@ TEST_F(RouterRetryStateImplTest, RouteConfigNoRetriesAllowed) { EXPECT_TRUE(state_->enabled()); EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(connect_failure_, callback_)); + + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(0UL, cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(0UL, virtual_cluster_.stats().upstream_rq_retry_.value()); } TEST_F(RouterRetryStateImplTest, RouteConfigNoHeaderConfig) { @@ -790,6 +749,7 @@ TEST_F(RouterRetryStateImplTest, NoAvailableRetries) { EXPECT_EQ(RetryStatus::NoOverflow, state_->shouldRetryReset(connect_failure_, callback_)); EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_overflow_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_overflow_.value()); } TEST_F(RouterRetryStateImplTest, MaxRetriesHeader) { @@ -825,6 +785,10 @@ TEST_F(RouterRetryStateImplTest, MaxRetriesHeader) { EXPECT_EQ(3UL, cluster_.stats().upstream_rq_retry_.value()); EXPECT_EQ(0UL, cluster_.stats().upstream_rq_retry_success_.value()); + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(3UL, virtual_cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(0UL, virtual_cluster_.stats().upstream_rq_retry_success_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); } TEST_F(RouterRetryStateImplTest, Backoff) { @@ -858,6 +822,8 @@ TEST_F(RouterRetryStateImplTest, Backoff) { EXPECT_EQ(3UL, cluster_.stats().upstream_rq_retry_.value()); EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_success_.value()); + EXPECT_EQ(3UL, virtual_cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_success_.value()); EXPECT_EQ(0UL, cluster_.circuit_breakers_stats_.rq_retry_open_.value()); } @@ -976,6 +942,11 @@ TEST_F(RouterRetryStateImplTest, ZeroMaxRetriesHeader) { EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(connect_failure_, callback_)); + + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(0UL, cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(0UL, virtual_cluster_.stats().upstream_rq_retry_.value()); } // Check that if there are 0 remaining retries available but we get @@ -991,6 +962,11 @@ TEST_F(RouterRetryStateImplTest, NoPreferredOverLimitExceeded) { Http::TestResponseHeaderMapImpl good_response_headers{{":status", "200"}}; EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(good_response_headers, callback_)); + + EXPECT_EQ(0UL, cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(0UL, virtual_cluster_.stats().upstream_rq_retry_limit_exceeded_.value()); + EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_.value()); + EXPECT_EQ(1UL, virtual_cluster_.stats().upstream_rq_retry_.value()); } TEST_F(RouterRetryStateImplTest, BudgetAvailableRetries) { diff --git a/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5118898564497408.fuzz b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5118898564497408.fuzz new file mode 100644 index 000000000000..3f5be1aaf8e9 --- /dev/null +++ b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5118898564497408.fuzz @@ -0,0 +1,436 @@ +config { + virtual_hosts { + name: "*" + domains: "*" + routes { + match { + safe_regex { + google_re2 { + max_program_size { + value: 1868323924 + } + } + regex: "\001\000\000\000\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177?\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\1|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||t|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||.||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||.....................................................................................................................................................-3489372105301376950.n......................................................................................................................................................................|..................................................................................................................................................................................................................................................\016..............................................................................................................................................................................................................................................................................................................................................................................................................\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177|177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177w\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177 }\n }\n nnnnnnnnnnnnnnnnnnnnn hash_policy {\n header {\n header_name: \"n\"\n }\n }\n hash_policy {\n header {\n header_name: \"e\"\n }\n }\nnnnnnnn }\n }\n }\n}\n" + } + } + redirect { + strip_query: true + } + } + routes { + match { + case_sensitive { + value: true + } + safe_regex { + google_re2 { + max_program_size { + value: 1868323924 + } + } + regex: "nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnennnnnnnnnnnnnnnnnnnnnnnnn" + } + } + redirect { + strip_query: true + } + } + } + response_headers_to_remove: "" + response_headers_to_remove: "" +} +headers { + headers { + } + headers { + key: "x-e`voy-iuternal" + } + headers { + } + headers { + key: "date" + value: "*" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + value: "=" + } + headers { + key: ":method" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: "x-e`voy-iuternal" + } + headers { + value: "\177\177\177\177\177\177\177\025" + } + headers { + key: ":method" + } + headers { + } + headers { + } + headers { + key: "date" + } + headers { + key: "TE" + value: "?" + } + headers { + value: "api.lyft.c?m" + } + headers { + key: ":method" + } + headers { + } + headers { + } + headers { + } + headers { + value: "*" + } + headers { + } + headers { + } + headers { + value: "*" + } + headers { + } + headers { + } + headers { + value: "*" + } + headers { + } + headers { + key: "x-forwarded-proto" + } + headers { + key: "date" + } + headers { + } + headers { + key: "date" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: "host" + } + headers { + key: "date" + } + headers { + } + headers { + key: ":method" + } + headers { + } + headers { + } + headers { + } + headers { + value: "=" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: "host" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: "host" + } + headers { + } + headers { + key: "host" + } + headers { + } + headers { + } + headers { + key: ":method" + } + headers { + key: "TE" + } + headers { + } + headers { + } + headers { + value: "=" + } + headers { + } + headers { + } + headers { + } + headers { + key: ":path" + } + headers { + } + headers { + value: "\001\000\000\000\000\000\000?" + } + headers { + } + headers { + } + headers { + key: ":path" + } + headers { + key: "x-forwarded-proto" + } + headers { + key: ":path" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: "host" + } + headers { + key: "x-forwarded-proto" + } + headers { + value: "api.lyft.c?m" + } + headers { + } + headers { + value: "api.lyft.c?m" + } + headers { + } + headers { + value: "date" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: "date" + value: "*" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: ":method" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: "x-forwarded-proto" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: "TE" + value: "?" + } + headers { + value: "*" + } + headers { + value: "\177\177\177\177\177\177\177\025" + } + headers { + } + headers { + key: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + headers { + } + headers { + } + headers { + } + headers { + value: "date" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + value: "*" + } + headers { + } + headers { + key: "TE" + value: "?" + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + } + headers { + key: "date" + } + headers { + } + headers { + } + headers { + } +} \ No newline at end of file diff --git a/test/common/router/route_fuzz_test.cc b/test/common/router/route_fuzz_test.cc index fea78715c7b3..089424a2744d 100644 --- a/test/common/router/route_fuzz_test.cc +++ b/test/common/router/route_fuzz_test.cc @@ -4,7 +4,7 @@ #include "common/router/config_impl.h" -#include "test/common/router/route_fuzz.pb.h" +#include "test/common/router/route_fuzz.pb.validate.h" #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" #include "test/mocks/server/mocks.h" @@ -43,7 +43,7 @@ DEFINE_PROTO_FUZZER(const test::common::router::RouteTestCase& input) { static NiceMock stream_info; static NiceMock factory_context; try { - TestUtility::validate(input.config()); + TestUtility::validate(input); ConfigImpl config(cleanRouteConfig(input.config()), factory_context, ProtobufMessage::getNullValidationVisitor(), true); auto headers = Fuzz::fromHeaders(input.headers()); diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index b7708eb7440a..b2d35aeb3ad1 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -55,7 +55,6 @@ using testing::MockFunction; using testing::NiceMock; using testing::Property; using testing::Return; -using testing::ReturnPointee; using testing::ReturnRef; using testing::StartsWith; @@ -67,8 +66,8 @@ class RouterTestFilter : public Filter { using Filter::Filter; // Filter RetryStatePtr createRetryState(const RetryPolicy&, Http::RequestHeaderMap&, - const Upstream::ClusterInfo&, Runtime::Loader&, - Runtime::RandomGenerator&, Event::Dispatcher&, + const Upstream::ClusterInfo&, const VirtualCluster*, + Runtime::Loader&, Runtime::RandomGenerator&, Event::Dispatcher&, Upstream::ResourcePriority) override { EXPECT_EQ(nullptr, retry_state_); retry_state_ = new NiceMock(); @@ -226,6 +225,10 @@ class RouterTestBase : public testing::Test { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } void verifyAttemptCountInResponseBasic(bool set_include_attempt_count_in_response, @@ -263,6 +266,8 @@ class RouterTestBase : public testing::Test { })); response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } void sendRequest(bool end_stream = true) { @@ -393,6 +398,10 @@ TEST_F(RouterTest, UpdateServerNameFilterState) { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, UpdateSubjectAltNamesFilterState) { @@ -417,6 +426,8 @@ TEST_F(RouterTest, UpdateSubjectAltNamesFilterState) { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, RouteNotFound) { @@ -429,6 +440,8 @@ TEST_F(RouterTest, RouteNotFound) { router_.decodeHeaders(headers, true); EXPECT_EQ(1UL, stats_store_.counter("test.no_route").value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_EQ(callbacks_.details_, "route_not_found"); } @@ -441,6 +454,8 @@ TEST_F(RouterTest, ClusterNotFound) { router_.decodeHeaders(headers, true); EXPECT_EQ(1UL, stats_store_.counter("test.no_cluster").value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_EQ(callbacks_.details_, "cluster_not_found"); } @@ -471,6 +486,9 @@ TEST_F(RouterTest, PoolFailureWithPriority) { HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + // Pool failure, so upstream request was not initiated. + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_EQ(callbacks_.details_, "upstream_reset_before_response_started{connection failure}"); } @@ -493,6 +511,8 @@ TEST_F(RouterTest, Http1Upstream) { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } // We don't get x-envoy-expected-rq-timeout-ms or an indication to insert @@ -516,6 +536,8 @@ TEST_F(RouterTestSuppressEnvoyHeaders, Http1Upstream) { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, Http2Upstream) { @@ -535,6 +557,8 @@ TEST_F(RouterTest, Http2Upstream) { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, HashPolicy) { @@ -560,6 +584,8 @@ TEST_F(RouterTest, HashPolicy) { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, HashPolicyNoHash) { @@ -585,6 +611,8 @@ TEST_F(RouterTest, HashPolicyNoHash) { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, HashKeyNoHashPolicy) { @@ -820,6 +848,8 @@ TEST_F(RouterTest, CancelBeforeBoundToPool) { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, NoHost) { @@ -839,6 +869,8 @@ TEST_F(RouterTest, NoHost) { .counter("upstream_rq_maintenance_mode") .value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_EQ(callbacks_.details_, "no_healthy_upstream"); } @@ -861,6 +893,8 @@ TEST_F(RouterTest, MaintenanceMode) { .counter("upstream_rq_maintenance_mode") .value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->load_report_stats_store_ .counter("upstream_rq_dropped") .value()); @@ -982,6 +1016,8 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Initial request has 1 attempt. EXPECT_EQ(1, atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); @@ -1006,6 +1042,8 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { return nullptr; })); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // The retry should cause the header to increase to 2. EXPECT_EQ(2, atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); @@ -1092,6 +1130,9 @@ TEST_F(RouterTest, EnvoyAttemptCountInResponsePresentWithLocalReply) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + // Pool failure, so upstream request was never initiated. + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); EXPECT_EQ(callbacks_.details_, "upstream_reset_before_response_started{connection failure}"); } @@ -1116,6 +1157,8 @@ TEST_F(RouterTest, EnvoyAttemptCountInResponseWithRetries) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -1137,6 +1180,8 @@ TEST_F(RouterTest, EnvoyAttemptCountInResponseWithRetries) { return nullptr; })); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -1183,6 +1228,8 @@ void RouterTestBase::testAppendCluster(absl::optional clu Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -1238,6 +1285,8 @@ void RouterTestBase::testAppendUpstreamHost( Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -1304,6 +1353,8 @@ void RouterTestBase::testDoNotForward( Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); } @@ -1341,6 +1392,8 @@ TEST_F(RouterTest, AllDebugConfig) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); } @@ -1364,6 +1417,8 @@ TEST_F(RouterTestSuppressEnvoyHeaders, EnvoyUpstreamServiceTime) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -1394,6 +1449,8 @@ TEST_F(RouterTest, NoRetriesOverflow) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -1415,6 +1472,8 @@ TEST_F(RouterTest, NoRetriesOverflow) { return nullptr; })); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // RetryOverflow kicks in. EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow)); @@ -1456,6 +1515,8 @@ TEST_F(RouterTest, ResetDuringEncodeHeaders) { EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } @@ -1482,6 +1543,8 @@ TEST_F(RouterTest, UpstreamTimeout) { router_.decodeHeaders(headers, false); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); @@ -1498,6 +1561,8 @@ TEST_F(RouterTest, UpstreamTimeout) { EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_timeout") .value()); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_timeout_.value()); EXPECT_EQ(1UL, cm_.conn_pool_.host_->stats().rq_timeout_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } @@ -1524,6 +1589,8 @@ TEST_F(RouterTest, TimeoutBudgetHistogramStat) { router_.decodeHeaders(headers, false); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Global timeout budget used. EXPECT_CALL( @@ -1565,6 +1632,8 @@ TEST_F(RouterTest, TimeoutBudgetHistogramStatFailure) { router_.decodeHeaders(headers, false); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Global timeout budget used. EXPECT_CALL( @@ -1603,6 +1672,8 @@ TEST_F(RouterTest, TimeoutBudgetHistogramStatOnlyGlobal) { router_.decodeHeaders(headers, false); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Global timeout budget used. EXPECT_CALL( @@ -1644,6 +1715,8 @@ TEST_F(RouterTest, TimeoutBudgetHistogramStatDuringRetries) { router_.decodeHeaders(headers, false); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Per-try budget used on the first request. EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_, @@ -1679,6 +1752,8 @@ TEST_F(RouterTest, TimeoutBudgetHistogramStatDuringRetries) { })); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Per-try budget exhausted on the second try. EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_, @@ -1735,6 +1810,8 @@ TEST_F(RouterTest, TimeoutBudgetHistogramStatDuringGlobalTimeout) { router_.decodeHeaders(headers, false); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Per-try budget used on the first request. EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_, @@ -1770,6 +1847,8 @@ TEST_F(RouterTest, TimeoutBudgetHistogramStatDuringGlobalTimeout) { })); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Global timeout was hit, fires 100. EXPECT_CALL( @@ -1822,6 +1901,8 @@ TEST_F(RouterTest, GrpcOkTrailersOnly) { {"grpc-timeout", "20S"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "0"}}); @@ -1848,6 +1929,8 @@ TEST_F(RouterTest, GrpcAlreadyExistsTrailersOnly) { {"grpc-timeout", "20S"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "6"}}); @@ -1874,6 +1957,8 @@ TEST_F(RouterTest, GrpcOutlierDetectionUnavailableStatusCode) { {"grpc-timeout", "20S"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "14"}}); @@ -1901,6 +1986,8 @@ TEST_F(RouterTest, GrpcInternalTrailersOnly) { {"grpc-timeout", "20S"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "13"}}); @@ -1928,6 +2015,8 @@ TEST_F(RouterTest, GrpcDataEndStream) { {"grpc-timeout", "20S"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -1958,6 +2047,8 @@ TEST_F(RouterTest, GrpcReset) { {"grpc-timeout", "20S"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -1989,6 +2080,8 @@ TEST_F(RouterTest, GrpcOk) { {"grpc-timeout", "20S"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(2); Http::ResponseHeaderMapPtr response_headers( @@ -2022,6 +2115,8 @@ TEST_F(RouterTest, GrpcInternal) { {"grpc-timeout", "20S"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -2058,6 +2153,8 @@ TEST_F(RouterTest, UpstreamTimeoutWithAltResponse) { router_.decodeHeaders(headers, false); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); @@ -2106,6 +2203,8 @@ TEST_F(RouterTest, UpstreamPerTryTimeout) { Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); @@ -2158,7 +2257,11 @@ TEST_F(RouterTest, UpstreamPerTryTimeoutDelayedPoolReady) { EXPECT_EQ(host_address_, host->address()); })); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); pool_callbacks->onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); @@ -2211,8 +2314,12 @@ TEST_F(RouterTest, UpstreamPerTryTimeoutExcludesNewStream) { per_try_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); EXPECT_CALL(*per_try_timeout_, enableTimer(_, _)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // The per try timeout timer should not be started yet. pool_callbacks->onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset)); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, @@ -2261,6 +2368,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutFirstRequestSucceeds) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL( cm_.conn_pool_.host_->outlier_detector_, @@ -2282,6 +2391,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutFirstRequestSucceeds) { })); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // We should not have updated any stats yet because no requests have been // canceled @@ -2337,6 +2448,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutResetsOnBadHeaders) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL( cm_.conn_pool_.host_->outlier_detector_, @@ -2358,6 +2471,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutResetsOnBadHeaders) { })); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // We should not have updated any stats yet because no requests have been // canceled @@ -2415,6 +2530,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0); @@ -2444,6 +2561,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) { })); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); @@ -2468,6 +2587,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) { per_try_timeout_->invokeCallback(); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(3U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); // Now write a 200 back. We expect the 2nd stream to be reset and stats to be @@ -2687,6 +2808,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutGlobalTimeout) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL( cm_.conn_pool_.host_->outlier_detector_, @@ -2709,6 +2832,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutGlobalTimeout) { })); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); @@ -2755,6 +2880,8 @@ TEST_F(RouterTest, HedgingRetriesExhaustedBadResponse) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL( cm_.conn_pool_.host_->outlier_detector_, @@ -2781,6 +2908,8 @@ TEST_F(RouterTest, HedgingRetriesExhaustedBadResponse) { .Times(1); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); @@ -2843,6 +2972,8 @@ TEST_F(RouterTest, HedgingRetriesProceedAfterReset) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL( cm_.conn_pool_.host_->outlier_detector_, @@ -2865,6 +2996,8 @@ TEST_F(RouterTest, HedgingRetriesProceedAfterReset) { })); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); @@ -2968,6 +3101,9 @@ TEST_F(RouterTest, HedgingRetryImmediatelyReset) { response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); + // Pool failure for the first try, so only 1 upstream request was made. + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, RetryNoneHealthy) { @@ -3006,6 +3142,9 @@ TEST_F(RouterTest, RetryNoneHealthy) { setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream)); router_.retry_state_->callback_(); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + // Pool failure for the first try, so only 1 upstream request was made. + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, RetryUpstreamReset) { @@ -3028,6 +3167,8 @@ TEST_F(RouterTest, RetryUpstreamReset) { EXPECT_CALL(callbacks_, addDecodedData(_, _)).Times(1); Buffer::OwnedImpl body("test body"); router_.decodeData(body, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); router_.retry_state_->expectResetRetry(); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, @@ -3048,6 +3189,8 @@ TEST_F(RouterTest, RetryUpstreamReset) { return nullptr; })); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); // Normal response. @@ -3081,6 +3224,8 @@ TEST_F(RouterTest, NoRetryWithBodyLimit) { EXPECT_CALL(callbacks_, addDecodedData(_, _)).Times(0); Buffer::OwnedImpl body("t"); router_.decodeData(body, false); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -3110,6 +3255,8 @@ TEST_F(RouterTest, RetryUpstreamPerTryTimeout) { {"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); router_.retry_state_->expectResetRetry(); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, @@ -3133,6 +3280,8 @@ TEST_F(RouterTest, RetryUpstreamPerTryTimeout) { })); expectPerTryTimerCreate(); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -3165,6 +3314,9 @@ TEST_F(RouterTest, RetryUpstreamConnectionFailure) { conn_pool_callbacks->onPoolFailure(Http::ConnectionPool::PoolFailureReason::ConnectionFailure, absl::string_view(), nullptr); + // Pool failure, so no upstream request was made. + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseDecoder* response_decoder = nullptr; // We expect this reset to kick off a new request. @@ -3179,6 +3331,8 @@ TEST_F(RouterTest, RetryUpstreamConnectionFailure) { return nullptr; })); router_.retry_state_->callback_(); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -3207,6 +3361,8 @@ TEST_F(RouterTest, DontResetStartedResponseOnUpstreamPerTryTimeout) { {"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Since the response is already started we don't retry. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -3223,6 +3379,8 @@ TEST_F(RouterTest, DontResetStartedResponseOnUpstreamPerTryTimeout) { EXPECT_EQ(0U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_rq_per_try_timeout") .value()); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, RetryUpstreamResetResponseStarted) { @@ -3241,6 +3399,8 @@ TEST_F(RouterTest, RetryUpstreamResetResponseStarted) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Since the response is already started we don't retry. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -3257,6 +3417,8 @@ TEST_F(RouterTest, RetryUpstreamResetResponseStarted) { // For normal HTTP, once we have a 200 we consider this a success, even if a // later reset occurs. EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, RetryUpstreamReset100ContinueResponseStarted) { @@ -3275,6 +3437,8 @@ TEST_F(RouterTest, RetryUpstreamReset100ContinueResponseStarted) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // The 100-continue will result in resetting retry_state_, so when the stream // is reset we won't even check shouldRetryReset() (or shouldRetryHeaders()). @@ -3290,6 +3454,8 @@ TEST_F(RouterTest, RetryUpstreamReset100ContinueResponseStarted) { EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, RetryUpstream5xx) { @@ -3308,6 +3474,8 @@ TEST_F(RouterTest, RetryUpstream5xx) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -3329,6 +3497,8 @@ TEST_F(RouterTest, RetryUpstream5xx) { return nullptr; })); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -3356,6 +3526,8 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelay) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -3394,6 +3566,8 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) { Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -3424,6 +3598,9 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) { EXPECT_CALL(callbacks_, encodeData(_, true)); response_timeout_->invokeCallback(); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + // Timeout fired so no retry was done. + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } // Retry timeout during a retry delay leading to no upstream host, as well as an alt response code. @@ -3445,6 +3622,8 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHostAltRespo {"x-envoy-upstream-rq-timeout-alt-response", "204"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -3473,6 +3652,9 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHostAltRespo EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); response_timeout_->invokeCallback(); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + // no retry was done. + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, RetryUpstream5xxNotComplete) { @@ -3499,6 +3681,8 @@ TEST_F(RouterTest, RetryUpstream5xxNotComplete) { Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}}; router_.decodeTrailers(trailers); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -3524,6 +3708,8 @@ TEST_F(RouterTest, RetryUpstream5xxNotComplete) { EXPECT_CALL(encoder2, encodeData(_, false)); EXPECT_CALL(encoder2, encodeTrailers(_)); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -3569,6 +3755,8 @@ TEST_F(RouterTest, RetryUpstreamGrpcCancelled) { {"grpc-timeout", "20S"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // gRPC with status "cancelled" (1) router_.retry_state_->expectHeadersRetry(); @@ -3590,6 +3778,8 @@ TEST_F(RouterTest, RetryUpstreamGrpcCancelled) { return nullptr; })); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -3632,6 +3822,8 @@ TEST_F(RouterTest, RetryRespsectsMaxHostSelectionCount) { Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}}; router_.decodeTrailers(trailers); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -3657,6 +3849,8 @@ TEST_F(RouterTest, RetryRespsectsMaxHostSelectionCount) { EXPECT_CALL(encoder2, encodeData(_, false)); EXPECT_CALL(encoder2, encodeTrailers(_)); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Now that we're triggered a retry, we should see the configured number of host selections. EXPECT_EQ(3, router_.hostSelectionRetryCount()); @@ -3703,6 +3897,8 @@ TEST_F(RouterTest, RetryRespectsRetryHostPredicate) { Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}}; router_.decodeTrailers(trailers); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -3728,6 +3924,8 @@ TEST_F(RouterTest, RetryRespectsRetryHostPredicate) { EXPECT_CALL(encoder2, encodeData(_, false)); EXPECT_CALL(encoder2, encodeTrailers(_)); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Now that we're triggered a retry, we should see the router reject hosts. EXPECT_TRUE(router_.shouldSelectAnotherHost(host)); @@ -3934,6 +4132,8 @@ TEST_F(RouterTest, Shadow) { EXPECT_FALSE(options.sampled_); })); router_.decodeTrailers(trailers); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -3962,6 +4162,8 @@ TEST_F(RouterTest, AltStatName) { {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResponseTime(_)); @@ -4008,6 +4210,8 @@ TEST_F(RouterTest, Redirect) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); } @@ -4029,6 +4233,8 @@ TEST_F(RouterTest, RedirectFound) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); } @@ -4048,6 +4254,8 @@ TEST_F(RouterTest, DirectResponse) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } @@ -4070,6 +4278,8 @@ TEST_F(RouterTest, DirectResponseWithBody) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } @@ -4092,6 +4302,8 @@ TEST_F(RouterTest, DirectResponseWithLocation) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } @@ -4113,6 +4325,8 @@ TEST_F(RouterTest, DirectResponseWithoutLocation) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } @@ -4139,6 +4353,8 @@ TEST_F(RouterTest, UpstreamSSLConnection) { Http::TestRequestHeaderMapImpl headers{}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -4178,6 +4394,8 @@ TEST_F(RouterTest, UpstreamTimingSingleRequest) { test_time_.sleep(std::chrono::milliseconds(32)); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "503"}}); @@ -4236,6 +4454,8 @@ TEST_F(RouterTest, UpstreamTimingRetry) { test_time_.sleep(std::chrono::milliseconds(32)); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); test_time_.sleep(std::chrono::milliseconds(43)); @@ -4321,6 +4541,8 @@ TEST_F(RouterTest, UpstreamTimingTimeout) { test_time_.sleep(std::chrono::milliseconds(13)); Buffer::OwnedImpl data; router_.decodeData(data, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); test_time_.sleep(std::chrono::milliseconds(33)); @@ -4872,6 +5094,8 @@ TEST_F(RouterTest, CanaryStatusTrue) { {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}, @@ -4906,6 +5130,8 @@ TEST_F(RouterTest, CanaryStatusFalse) { {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}, @@ -4955,6 +5181,8 @@ TEST_F(RouterTest, AutoHostRewriteEnabled) { })); EXPECT_CALL(callbacks_.route_->route_entry_, autoHostRewrite()).WillOnce(Return(true)); router_.decodeHeaders(incoming_headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, AutoHostRewriteDisabled) { @@ -4990,6 +5218,8 @@ TEST_F(RouterTest, AutoHostRewriteDisabled) { })); EXPECT_CALL(callbacks_.route_->route_entry_, autoHostRewrite()).WillOnce(Return(false)); router_.decodeHeaders(incoming_headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } TEST_F(RouterTest, UpstreamSocketOptionsReturnedEmpty) { @@ -5043,6 +5273,8 @@ TEST_F(RouterTest, ApplicationProtocols) { EXPECT_CALL(cancellable_, cancel()); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } class WatermarkTest : public RouterTest { @@ -5069,6 +5301,10 @@ class WatermarkTest : public RouterTest { })); HttpTestUtility::addDefaultHeaders(headers_); router_.decodeHeaders(headers_, header_only_request); + if (pool_ready) { + EXPECT_EQ( + 1U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + } } void sendResponse() { response_decoder_->decodeHeaders( @@ -5193,6 +5429,8 @@ TEST_F(WatermarkTest, RetryRequestNotComplete) { EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _)).Times(0); // This will result in retry_state_ being deleted. router_.decodeData(data, false); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // This should not trigger a retry as the retry state has been deleted. EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, @@ -5232,6 +5470,8 @@ TEST_F(RouterTestChildSpan, BasicFlow) { .WillOnce(Return(child_span)); EXPECT_CALL(callbacks_, tracingConfig()); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); @@ -5273,6 +5513,8 @@ TEST_F(RouterTestChildSpan, ResetFlow) { .WillOnce(Return(child_span)); EXPECT_CALL(callbacks_, tracingConfig()); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Upstream responds back to envoy. Http::ResponseHeaderMapPtr response_headers( @@ -5318,6 +5560,8 @@ TEST_F(RouterTestChildSpan, CancelFlow) { .WillOnce(Return(child_span)); EXPECT_CALL(callbacks_, tracingConfig()); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Destroy the router, causing the upstream request to be cancelled. // Response code on span is 0 because the upstream never sent a response. @@ -5360,6 +5604,8 @@ TEST_F(RouterTestChildSpan, ResetRetryFlow) { .WillOnce(Return(child_span_1)); EXPECT_CALL(callbacks_, tracingConfig()); router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // The span should be annotated with the reset-related fields. EXPECT_CALL(*child_span_1, @@ -5397,6 +5643,8 @@ TEST_F(RouterTestChildSpan, ResetRetryFlow) { EXPECT_CALL(*child_span_2, setTag(Eq(Tracing::Tags::get().RetryCount), Eq("1"))); router_.retry_state_->callback_(); + EXPECT_EQ(2U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Upstream responds back with a normal response. Span should be annotated as usual. Http::ResponseHeaderMapPtr response_headers( diff --git a/test/common/router/router_upstream_log_test.cc b/test/common/router/router_upstream_log_test.cc index 582c07a2b1f8..d62043effadb 100644 --- a/test/common/router/router_upstream_log_test.cc +++ b/test/common/router/router_upstream_log_test.cc @@ -1,4 +1,5 @@ #include +#include #include #include "envoy/config/accesslog/v3/accesslog.pb.h" @@ -61,8 +62,8 @@ class TestFilter : public Filter { // Filter RetryStatePtr createRetryState(const RetryPolicy&, Http::RequestHeaderMap&, - const Upstream::ClusterInfo&, Runtime::Loader&, - Runtime::RandomGenerator&, Event::Dispatcher&, + const Upstream::ClusterInfo&, const VirtualCluster*, + Runtime::Loader&, Runtime::RandomGenerator&, Event::Dispatcher&, Upstream::ResourcePriority) override { EXPECT_EQ(nullptr, retry_state_); retry_state_ = new NiceMock(); @@ -92,9 +93,9 @@ class RouterUpstreamLogTest : public testing::Test { current_upstream_log->CopyFrom(upstream_log.value()); } - config_.reset(new FilterConfig("prefix.", context_, ShadowWriterPtr(new MockShadowWriter()), - router_proto)); - router_.reset(new TestFilter(*config_)); + config_ = std::make_shared("prefix.", context_, + ShadowWriterPtr(new MockShadowWriter()), router_proto); + router_ = std::make_shared(*config_); router_->setDecoderFilterCallbacks(callbacks_); EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(testing::AnyNumber()); diff --git a/test/common/router/shadow_writer_impl_test.cc b/test/common/router/shadow_writer_impl_test.cc index 64a3984369a8..d95ae08565b4 100644 --- a/test/common/router/shadow_writer_impl_test.cc +++ b/test/common/router/shadow_writer_impl_test.cc @@ -27,7 +27,6 @@ class ShadowWriterImplTest : public testing::Test { message->headers().setHost(host); EXPECT_CALL(cm_, get(Eq("foo"))); EXPECT_CALL(cm_, httpAsyncClientForCluster("foo")).WillOnce(ReturnRef(cm_.async_client_)); - Http::MockAsyncClientRequest request(&cm_.async_client_); auto options = Http::AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(5)); EXPECT_CALL(cm_.async_client_, send_(_, _, options)) .WillOnce(Invoke( @@ -36,13 +35,14 @@ class ShadowWriterImplTest : public testing::Test { EXPECT_EQ(message, inner_message); EXPECT_EQ(shadowed_host, message->headers().Host()->value().getStringView()); callback_ = &callbacks; - return &request; + return &request_; })); writer_.shadow("foo", std::move(message), options); } Upstream::MockClusterManager cm_; ShadowWriterImpl writer_{cm_}; + Http::MockAsyncClientRequest request_{&cm_.async_client_}; Http::AsyncClient::Callbacks* callback_{}; }; @@ -51,14 +51,14 @@ TEST_F(ShadowWriterImplTest, Success) { expectShadowWriter("cluster1", "cluster1-shadow"); Http::ResponseMessagePtr response(new Http::ResponseMessageImpl()); - callback_->onSuccess(std::move(response)); + callback_->onSuccess(request_, std::move(response)); } TEST_F(ShadowWriterImplTest, Failure) { InSequence s; expectShadowWriter("cluster1:8000", "cluster1-shadow:8000"); - callback_->onFailure(Http::AsyncClient::FailureReason::Reset); + callback_->onFailure(request_, Http::AsyncClient::FailureReason::Reset); } TEST_F(ShadowWriterImplTest, NoCluster) { diff --git a/test/common/runtime/BUILD b/test/common/runtime/BUILD index 3bdc96107997..c2cd9d5be4be 100644 --- a/test/common/runtime/BUILD +++ b/test/common/runtime/BUILD @@ -46,7 +46,7 @@ envoy_cc_test( "//source/common/config:runtime_utility_lib", "//source/common/runtime:runtime_lib", "//source/common/stats:isolated_store_lib", - "//source/common/stats:stats_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", "//test/mocks/filesystem:filesystem_mocks", "//test/mocks/init:init_mocks", @@ -75,12 +75,3 @@ envoy_cc_test( "//source/common/runtime:runtime_lib", ], ) - -envoy_cc_test( - name = "uuid_util_test", - srcs = ["uuid_util_test.cc"], - deps = [ - "//source/common/runtime:runtime_lib", - "//source/common/runtime:uuid_util_lib", - ], -) diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index ce2f3be1ad82..85c14f3bbe21 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -9,8 +9,8 @@ #include "common/config/runtime_utility.h" #include "common/runtime/runtime_impl.h" -#include "common/stats/isolated_store_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" #include "test/mocks/filesystem/mocks.h" #include "test/mocks/init/mocks.h" @@ -111,7 +111,7 @@ class LoaderImplTest : public testing::Test { Event::MockDispatcher dispatcher_; NiceMock tls_; - Stats::IsolatedStoreImpl store_; + Stats::TestUtil::TestStore store_; MockRandomGenerator generator_; std::unique_ptr loader_; Api::ApiPtr api_; @@ -422,7 +422,7 @@ TEST_F(DiskLoaderImplTest, PercentHandling) { } } -void testNewOverrides(Loader& loader, Stats::Store& store) { +void testNewOverrides(Loader& loader, Stats::TestUtil::TestStore& store) { Stats::Gauge& admin_overrides_active = store.gauge("runtime.admin_overrides_active", Stats::Gauge::ImportMode::NeverImport); @@ -642,6 +642,7 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { // Double getting. EXPECT_EQ(1.1, loader_->snapshot().getDouble("file_with_words", 1.1)); EXPECT_EQ(23.2, loader_->snapshot().getDouble("file_with_double", 1.1)); + EXPECT_EQ(2.0, loader_->snapshot().getDouble("file3", 3.3)); // Boolean getting. const auto snapshot = reinterpret_cast(&loader_->snapshot()); @@ -795,7 +796,7 @@ class DiskLayerTest : public testing::Test { protected: DiskLayerTest() : api_(Api::createApiForTest()) {} - static void SetUpTestSuite() { + static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) TestEnvironment::exec( {TestEnvironment::runfilesPath("test/common/runtime/filesystem_setup.sh")}); } diff --git a/test/common/runtime/runtime_protos_test.cc b/test/common/runtime/runtime_protos_test.cc index 0cb3cecdc52c..e8a61f27c9c3 100644 --- a/test/common/runtime/runtime_protos_test.cc +++ b/test/common/runtime/runtime_protos_test.cc @@ -19,12 +19,31 @@ namespace Envoy { namespace Runtime { namespace { -class FeatureFlagTest : public testing::Test { +class RuntimeProtosTest : public testing::Test { protected: NiceMock runtime_; }; -TEST_F(FeatureFlagTest, FeatureFlagBasicTest) { +TEST_F(RuntimeProtosTest, DoubleBasicTest) { + envoy::config::core::v3::RuntimeDouble double_proto; + std::string yaml(R"EOF( +runtime_key: "foo.bar" +default_value: 4.2 +)EOF"); + TestUtility::loadFromYamlAndValidate(yaml, double_proto); + Double test_double(double_proto, runtime_); + + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 4.2)); + EXPECT_EQ(4.2, test_double.value()); + + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 4.2)).WillOnce(Return(1.337)); + EXPECT_EQ(1.337, test_double.value()); + + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.bar", 4.2)).WillOnce(Return(1)); + EXPECT_EQ(1.0, test_double.value()); +} + +TEST_F(RuntimeProtosTest, FeatureFlagBasicTest) { envoy::config::core::v3::RuntimeFeatureFlag feature_flag_proto; std::string yaml(R"EOF( runtime_key: "foo.bar" @@ -54,7 +73,7 @@ default_value: false EXPECT_EQ(true, test_feature2.enabled()); } -TEST_F(FeatureFlagTest, FeatureFlagEmptyProtoTest) { +TEST_F(RuntimeProtosTest, FeatureFlagEmptyProtoTest) { envoy::config::core::v3::RuntimeFeatureFlag empty_proto; FeatureFlag test(empty_proto, runtime_); @@ -62,7 +81,7 @@ TEST_F(FeatureFlagTest, FeatureFlagEmptyProtoTest) { EXPECT_EQ(true, test.enabled()); } -TEST_F(FeatureFlagTest, FractionalPercentBasicTest) { +TEST_F(RuntimeProtosTest, FractionalPercentBasicTest) { envoy::config::core::v3::RuntimeFractionalPercent runtime_fractional_percent_proto; std::string yaml(R"EOF( runtime_key: "foo.bar" diff --git a/test/common/runtime/uuid_util_test.cc b/test/common/runtime/uuid_util_test.cc deleted file mode 100644 index 8596783038a5..000000000000 --- a/test/common/runtime/uuid_util_test.cc +++ /dev/null @@ -1,94 +0,0 @@ -#include - -#include "common/runtime/runtime_impl.h" -#include "common/runtime/uuid_util.h" - -#include "gtest/gtest.h" - -namespace Envoy { -TEST(UUIDUtilsTest, mod) { - uint64_t result; - EXPECT_TRUE(UuidUtils::uuidModBy("00000000-0000-0000-0000-000000000000", result, 100)); - EXPECT_EQ(0, result); - - EXPECT_TRUE(UuidUtils::uuidModBy("00000001-0000-0000-0000-000000000000", result, 100)); - EXPECT_EQ(1, result); - - EXPECT_TRUE(UuidUtils::uuidModBy("0000000f-0000-0000-0000-00000000000a", result, 100)); - EXPECT_EQ(15, result); - - EXPECT_FALSE(UuidUtils::uuidModBy("", result, 100)); - - EXPECT_TRUE(UuidUtils::uuidModBy("000000ff-0000-0000-0000-000000000000", result, 100)); - EXPECT_EQ(55, result); - - EXPECT_TRUE(UuidUtils::uuidModBy("000000ff-0000-0000-0000-000000000000", result, 10000)); - EXPECT_EQ(255, result); - - EXPECT_TRUE(UuidUtils::uuidModBy("a0090100-0012-0110-00ff-0c00400600ff", result, 137)); - EXPECT_EQ(8, result); - - EXPECT_TRUE(UuidUtils::uuidModBy("ffffffff-0012-0110-00ff-0c00400600ff", result, 100)); - EXPECT_EQ(95, result); - - EXPECT_TRUE(UuidUtils::uuidModBy("ffffffff-0012-0110-00ff-0c00400600ff", result, 10000)); - EXPECT_EQ(7295, result); -} - -TEST(UUIDUtilsTest, checkDistribution) { - Runtime::RandomGeneratorImpl random; - - const int mod = 100; - const int required_percentage = 11; - int total_samples = 0; - int interesting_samples = 0; - - for (int i = 0; i < 500000; ++i) { - std::string uuid = random.uuid(); - - const char c = uuid[19]; - ASSERT_TRUE(uuid[14] == '4'); // UUID version 4 (random) - ASSERT_TRUE(c == '8' || c == '9' || c == 'a' || c == 'b'); // UUID variant 1 (RFC4122) - - uint64_t value; - ASSERT_TRUE(UuidUtils::uuidModBy(uuid, value, mod)); - - if (value < required_percentage) { - interesting_samples++; - } - total_samples++; - } - - EXPECT_NEAR(required_percentage / 100.0, interesting_samples * 1.0 / total_samples, 0.002); -} - -TEST(UUIDUtilsTest, DISABLED_benchmark) { - Runtime::RandomGeneratorImpl random; - - for (int i = 0; i < 100000000; ++i) { - random.uuid(); - } -} - -TEST(UUIDUtilsTest, setAndCheckTraceable) { - Runtime::RandomGeneratorImpl random; - - std::string uuid = random.uuid(); - EXPECT_EQ(UuidTraceStatus::NoTrace, UuidUtils::isTraceableUuid(uuid)); - - EXPECT_TRUE(UuidUtils::setTraceableUuid(uuid, UuidTraceStatus::Sampled)); - EXPECT_EQ(UuidTraceStatus::Sampled, UuidUtils::isTraceableUuid(uuid)); - - EXPECT_TRUE(UuidUtils::setTraceableUuid(uuid, UuidTraceStatus::Client)); - EXPECT_EQ(UuidTraceStatus::Client, UuidUtils::isTraceableUuid(uuid)); - - EXPECT_TRUE(UuidUtils::setTraceableUuid(uuid, UuidTraceStatus::Forced)); - EXPECT_EQ(UuidTraceStatus::Forced, UuidUtils::isTraceableUuid(uuid)); - - EXPECT_TRUE(UuidUtils::setTraceableUuid(uuid, UuidTraceStatus::NoTrace)); - EXPECT_EQ(UuidTraceStatus::NoTrace, UuidUtils::isTraceableUuid(uuid)); - - std::string invalid_uuid = ""; - EXPECT_FALSE(UuidUtils::setTraceableUuid(invalid_uuid, UuidTraceStatus::Forced)); -} -} // namespace Envoy diff --git a/test/common/secret/sds_api_test.cc b/test/common/secret/sds_api_test.cc index 4a94ca7ab1ee..b67d0a06a050 100644 --- a/test/common/secret/sds_api_test.cc +++ b/test/common/secret/sds_api_test.cc @@ -31,7 +31,7 @@ namespace { class SdsApiTest : public testing::Test { protected: - SdsApiTest() : api_(Api::createApiForTest()) { + SdsApiTest() : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher()) { EXPECT_CALL(init_manager_, add(_)).WillOnce(Invoke([this](const Init::Target& target) { init_target_handle_ = target.createHandle("test"); })); @@ -46,6 +46,7 @@ class SdsApiTest : public testing::Test { NiceMock init_watcher_; Event::GlobalTimeSystem time_system_; Init::TargetHandlePtr init_target_handle_; + Event::DispatcherPtr dispatcher_; }; // Validate that SdsApi object is created and initialized successfully. @@ -55,8 +56,9 @@ TEST_F(SdsApiTest, BasicTest) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; - TlsCertificateSdsApi sds_api(config_source, "abc.com", subscription_factory_, time_system_, - validation_visitor_, server.stats(), init_manager_, []() {}); + TlsCertificateSdsApi sds_api( + config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, + server.stats(), init_manager_, []() {}, *dispatcher_, *api_); initialize(); } @@ -65,8 +67,9 @@ TEST_F(SdsApiTest, BasicTest) { TEST_F(SdsApiTest, DynamicTlsCertificateUpdateSuccess) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; - TlsCertificateSdsApi sds_api(config_source, "abc.com", subscription_factory_, time_system_, - validation_visitor_, server.stats(), init_manager_, []() {}); + TlsCertificateSdsApi sds_api( + config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, + server.stats(), init_manager_, []() {}, *dispatcher_, *api_); initialize(); NiceMock secret_callback; auto handle = @@ -107,9 +110,11 @@ class PartialMockSds : public SdsApi { public: PartialMockSds(NiceMock& server, NiceMock& init_manager, envoy::config::core::v3::ConfigSource& config_source, - Config::SubscriptionFactory& subscription_factory, TimeSource& time_source) - : SdsApi(config_source, "abc.com", subscription_factory, time_source, validation_visitor_, - server.stats(), init_manager, []() {}) {} + Config::SubscriptionFactory& subscription_factory, TimeSource& time_source, + Event::Dispatcher& dispatcher, Api::Api& api) + : SdsApi( + config_source, "abc.com", subscription_factory, time_source, validation_visitor_, + server.stats(), init_manager, []() {}, dispatcher, api) {} MOCK_METHOD(void, onConfigUpdate, (const Protobuf::RepeatedPtrField&, const std::string&)); @@ -121,6 +126,7 @@ class PartialMockSds : public SdsApi { } void setSecret(const envoy::extensions::transport_sockets::tls::v3::Secret&) override {} void validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret&) override {} + std::vector getDataSourceFilenames() override { return {}; } NiceMock validation_visitor_; }; @@ -142,7 +148,8 @@ TEST_F(SdsApiTest, Delta) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; Event::GlobalTimeSystem time_system; - PartialMockSds sds(server, init_manager_, config_source, subscription_factory_, time_system); + PartialMockSds sds(server, init_manager_, config_source, subscription_factory_, time_system, + *dispatcher_, *api_); initialize(); EXPECT_CALL(sds, onConfigUpdate(RepeatedProtoEq(for_matching), "version1")); subscription_factory_.callbacks_->onConfigUpdate(resources, {}, "ignored"); @@ -160,8 +167,9 @@ TEST_F(SdsApiTest, Delta) { TEST_F(SdsApiTest, DeltaUpdateSuccess) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; - TlsCertificateSdsApi sds_api(config_source, "abc.com", subscription_factory_, time_system_, - validation_visitor_, server.stats(), init_manager_, []() {}); + TlsCertificateSdsApi sds_api( + config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, + server.stats(), init_manager_, []() {}, *dispatcher_, *api_); NiceMock secret_callback; auto handle = @@ -204,9 +212,9 @@ TEST_F(SdsApiTest, DeltaUpdateSuccess) { TEST_F(SdsApiTest, DynamicCertificateValidationContextUpdateSuccess) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; - CertificateValidationContextSdsApi sds_api(config_source, "abc.com", subscription_factory_, - time_system_, validation_visitor_, server.stats(), - init_manager_, []() {}); + CertificateValidationContextSdsApi sds_api( + config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, + server.stats(), init_manager_, []() {}, *dispatcher_, *api_); NiceMock secret_callback; auto handle = @@ -258,9 +266,9 @@ class MockCvcValidationCallback : public CvcValidationCallback { TEST_F(SdsApiTest, DefaultCertificateValidationContextTest) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; - CertificateValidationContextSdsApi sds_api(config_source, "abc.com", subscription_factory_, - time_system_, validation_visitor_, server.stats(), - init_manager_, []() {}); + CertificateValidationContextSdsApi sds_api( + config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, + server.stats(), init_manager_, []() {}, *dispatcher_, *api_); NiceMock secret_callback; auto handle = @@ -346,8 +354,9 @@ class MockGenericSecretValidationCallback : public GenericSecretValidationCallba TEST_F(SdsApiTest, GenericSecretSdsApiTest) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; - GenericSecretSdsApi sds_api(config_source, "encryption_key", subscription_factory_, time_system_, - validation_visitor_, server.stats(), init_manager_, []() {}); + GenericSecretSdsApi sds_api( + config_source, "encryption_key", subscription_factory_, time_system_, validation_visitor_, + server.stats(), init_manager_, []() {}, *dispatcher_, *api_); NiceMock secret_callback; auto handle = @@ -390,8 +399,9 @@ name: "encryption_key" TEST_F(SdsApiTest, EmptyResource) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; - TlsCertificateSdsApi sds_api(config_source, "abc.com", subscription_factory_, time_system_, - validation_visitor_, server.stats(), init_manager_, []() {}); + TlsCertificateSdsApi sds_api( + config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, + server.stats(), init_manager_, []() {}, *dispatcher_, *api_); Protobuf::RepeatedPtrField secret_resources; @@ -405,8 +415,9 @@ TEST_F(SdsApiTest, EmptyResource) { TEST_F(SdsApiTest, SecretUpdateWrongSize) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; - TlsCertificateSdsApi sds_api(config_source, "abc.com", subscription_factory_, time_system_, - validation_visitor_, server.stats(), init_manager_, []() {}); + TlsCertificateSdsApi sds_api( + config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, + server.stats(), init_manager_, []() {}, *dispatcher_, *api_); std::string yaml = R"EOF( @@ -434,8 +445,9 @@ TEST_F(SdsApiTest, SecretUpdateWrongSize) { TEST_F(SdsApiTest, SecretUpdateWrongSecretName) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; - TlsCertificateSdsApi sds_api(config_source, "abc.com", subscription_factory_, time_system_, - validation_visitor_, server.stats(), init_manager_, []() {}); + TlsCertificateSdsApi sds_api( + config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, + server.stats(), init_manager_, []() {}, *dispatcher_, *api_); std::string yaml = R"EOF( diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index b25ca10cad61..66d6c060f54f 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -32,7 +32,8 @@ namespace { class SecretManagerImplTest : public testing::Test, public Logger::Loggable { protected: - SecretManagerImplTest() : api_(Api::createApiForTest()) {} + SecretManagerImplTest() + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher()) {} void checkConfigDump(const std::string& expected_dump_yaml) { auto message_ptr = config_tracker_.config_tracker_callbacks_["secrets"](); @@ -48,6 +49,7 @@ class SecretManagerImplTest : public testing::Test, public Logger::Loggable config_tracker_; Event::SimulatedTimeSystem time_system_; + Event::DispatcherPtr dispatcher_; }; // Validate that secret manager adds static TLS certificate secret successfully. @@ -338,7 +340,6 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; - NiceMock dispatcher; NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; @@ -350,8 +351,9 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { })); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); + EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); auto secret_provider = secret_manager->findOrCreateTlsCertificateProvider(config_source, "abc.com", secret_context); @@ -388,7 +390,6 @@ TEST_F(SecretManagerImplTest, SdsDynamicGenericSecret) { envoy::config::core::v3::ConfigSource config_source; NiceMock secret_context; - NiceMock dispatcher; NiceMock validation_visitor; Stats::IsolatedStoreImpl stats; NiceMock init_manager; @@ -396,11 +397,12 @@ TEST_F(SecretManagerImplTest, SdsDynamicGenericSecret) { Init::TargetHandlePtr init_target_handle; NiceMock init_watcher; - EXPECT_CALL(secret_context, dispatcher()).WillOnce(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, messageValidationVisitor()).WillOnce(ReturnRef(validation_visitor)); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); + EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); EXPECT_CALL(init_manager, add(_)) .WillOnce(Invoke([&init_target_handle](const Init::Target& target) { init_target_handle = target.createHandle("test"); @@ -506,7 +508,7 @@ name: "abc.com" name: "abc.com.validation" validation_context: trusted_ca: - inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" + inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(validation_yaml), typed_secret); secret_resources.Clear(); @@ -535,7 +537,7 @@ name: "abc.com.validation" inline_string: "[redacted]" password: inline_string: "[redacted]" -- name: "abc.com.validation" +- name: "abc.com.validation" version_info: "validation-context-v1" last_updated: seconds: 1234567899 @@ -544,7 +546,7 @@ name: "abc.com.validation" name: "abc.com.validation" validation_context: trusted_ca: - inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" + inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" )EOF"; checkConfigDump(updated_config_dump); @@ -586,7 +588,7 @@ name: "abc.com.stek" inline_string: "[redacted]" password: inline_string: "[redacted]" -- name: "abc.com.validation" +- name: "abc.com.validation" version_info: "validation-context-v1" last_updated: seconds: 1234567899 @@ -595,8 +597,8 @@ name: "abc.com.stek" name: "abc.com.validation" validation_context: trusted_ca: - inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" -- name: "abc.com.stek" + inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" +- name: "abc.com.stek" version_info: "stek-context-v1" last_updated: seconds: 1234567899 @@ -650,7 +652,7 @@ name: "signing_key" inline_string: "[redacted]" password: inline_string: "[redacted]" -- name: "abc.com.validation" +- name: "abc.com.validation" version_info: "validation-context-v1" last_updated: seconds: 1234567899 @@ -659,8 +661,8 @@ name: "signing_key" name: "abc.com.validation" validation_context: trusted_ca: - inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" -- name: "abc.com.stek" + inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" +- name: "abc.com.stek" version_info: "stek-context-v1" last_updated: seconds: 1234567899 @@ -672,7 +674,7 @@ name: "signing_key" - filename: "[redacted]" - inline_string: "[redacted]" - inline_bytes: "W3JlZGFjdGVkXQ==" -- name: "signing_key" +- name: "signing_key" version_info: "signing-key-v1" last_updated: seconds: 1234567900 @@ -878,7 +880,7 @@ name: "abc.com.nopassword" inline_string: "DUMMY_INLINE_BYTES_FOR_CERT_CHAIN" private_key: inline_string: "[redacted]" -- name: "abc.com" +- name: "abc.com" secret: "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret name: "abc.com" diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index 8b11812295a0..6869f2f732a8 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -70,6 +70,7 @@ envoy_cc_test( name = "stat_merger_test", srcs = ["stat_merger_test.cc"], deps = [ + ":stat_test_utility_lib", "//source/common/stats:isolated_store_lib", "//source/common/stats:stat_merger_lib", "//source/common/stats:symbol_table_creator_lib", diff --git a/test/common/stats/isolated_store_impl_test.cc b/test/common/stats/isolated_store_impl_test.cc index b85feeb90207..ffa8e94f6915 100644 --- a/test/common/stats/isolated_store_impl_test.cc +++ b/test/common/stats/isolated_store_impl_test.cc @@ -34,8 +34,8 @@ class StatsIsolatedStoreImplTest : public testing::Test { TEST_F(StatsIsolatedStoreImplTest, All) { ScopePtr scope1 = store_->createScope("scope1."); - Counter& c1 = store_->counter("c1"); - Counter& c2 = scope1->counter("c2"); + Counter& c1 = store_->counterFromString("c1"); + Counter& c2 = scope1->counterFromString("c2"); EXPECT_EQ("c1", c1.name()); EXPECT_EQ("scope1.c2", c2.name()); EXPECT_EQ("c1", c1.tagExtractedName()); @@ -57,8 +57,8 @@ TEST_F(StatsIsolatedStoreImplTest, All) { c1.add(100); EXPECT_EQ(200, found_counter->get().value()); - Gauge& g1 = store_->gauge("g1", Gauge::ImportMode::Accumulate); - Gauge& g2 = scope1->gauge("g2", Gauge::ImportMode::Accumulate); + Gauge& g1 = store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); + Gauge& g2 = scope1->gaugeFromString("g2", Gauge::ImportMode::Accumulate); EXPECT_EQ("g1", g1.name()); EXPECT_EQ("scope1.g2", g2.name()); EXPECT_EQ("g1", g1.tagExtractedName()); @@ -83,10 +83,10 @@ TEST_F(StatsIsolatedStoreImplTest, All) { g1.set(0); EXPECT_EQ(0, found_gauge->get().value()); - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); EXPECT_TRUE(h1.used()); // hardcoded in impl to be true always. EXPECT_TRUE(h1.use_count() == 1); - Histogram& h2 = scope1->histogram("h2", Stats::Histogram::Unit::Unspecified); + Histogram& h2 = scope1->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); scope1->deliverHistogramToSinks(h2, 0); EXPECT_EQ("h1", h1.name()); EXPECT_EQ("scope1.h2", h2.name()); @@ -111,11 +111,11 @@ TEST_F(StatsIsolatedStoreImplTest, All) { EXPECT_EQ(&h1, &found_histogram->get()); ScopePtr scope2 = scope1->createScope("foo."); - EXPECT_EQ("scope1.foo.bar", scope2->counter("bar").name()); + EXPECT_EQ("scope1.foo.bar", scope2->counterFromString("bar").name()); // Validate that we sanitize away bad characters in the stats prefix. ScopePtr scope3 = scope1->createScope(std::string("foo:\0:.", 7)); - EXPECT_EQ("scope1.foo___.bar", scope3->counter("bar").name()); + EXPECT_EQ("scope1.foo___.bar", scope3->counterFromString("bar").name()); EXPECT_EQ(4UL, store_->counters().size()); EXPECT_EQ(2UL, store_->gauges().size()); @@ -129,7 +129,7 @@ TEST_F(StatsIsolatedStoreImplTest, All) { TEST_F(StatsIsolatedStoreImplTest, PrefixIsStatName) { ScopePtr scope1 = store_->createScope("scope1"); ScopePtr scope2 = scope1->createScope("scope2"); - Counter& c1 = scope2->counter("c1"); + Counter& c1 = scope2->counterFromString("c1"); EXPECT_EQ("scope1.scope2.c1", c1.name()); } @@ -172,7 +172,7 @@ TEST_F(StatsIsolatedStoreImplTest, AllWithSymbolTable) { // Validate that we sanitize away bad characters in the stats prefix. ScopePtr scope3 = scope1->createScope(std::string("foo:\0:.", 7)); - EXPECT_EQ("scope1.foo___.bar", scope3->counter("bar").name()); + EXPECT_EQ("scope1.foo___.bar", scope3->counterFromString("bar").name()); EXPECT_EQ(4UL, store_->counters().size()); EXPECT_EQ(2UL, store_->gauges().size()); @@ -190,7 +190,7 @@ TEST_F(StatsIsolatedStoreImplTest, LongStatName) { const std::string long_string(128, 'A'); ScopePtr scope = store_->createScope("scope."); - Counter& counter = scope->counter(long_string); + Counter& counter = scope->counterFromString(long_string); EXPECT_EQ(absl::StrCat("scope.", long_string), counter.name()); } diff --git a/test/common/stats/stat_merger_corpus/example5 b/test/common/stats/stat_merger_corpus/example5 new file mode 100644 index 000000000000..bfb828b5c122 --- /dev/null +++ b/test/common/stats/stat_merger_corpus/example5 @@ -0,0 +1 @@ +ไ-aÿÿÿsdsdfoj pa098ausd0f8nuis-a1foj pa098ausd„-aÿÿÿsdsdfoj pa098ausd0f8nuis-a1foj pa098ausd0f214748364ʶ8ó €´nuis-a9.2233720368547󠀡75709j p \ No newline at end of file diff --git a/test/common/stats/stat_merger_fuzz_test.cc b/test/common/stats/stat_merger_fuzz_test.cc index 8e4541778213..44077aa82e24 100644 --- a/test/common/stats/stat_merger_fuzz_test.cc +++ b/test/common/stats/stat_merger_fuzz_test.cc @@ -25,9 +25,13 @@ void testDynamicEncoding(absl::string_view data, SymbolTable& symbol_table) { std::string unit_test_encoding; for (uint32_t index = 0; index < data.size();) { - // Select component lengths between 0 and 7 bytes inclusive, and ensure it - // doesn't overrun our buffer. It's OK to get very small or empty segments. - uint32_t num_bytes = data[index] & 0x7; + // Select component lengths between 1 and 8 bytes inclusive, and ensure it + // doesn't overrun our buffer. + // + // TODO(#10008): We should remove the "1 +" below, so we can get empty + // segments, which trigger some inconsistent handling as described in that + // bug. + uint32_t num_bytes = 1 + data[index] & 0x7; num_bytes = std::min(static_cast(data.size() - 1), num_bytes); // restrict number up to the size of data diff --git a/test/common/stats/stat_merger_test.cc b/test/common/stats/stat_merger_test.cc index 3cb0cac86cae..bb47651bcdc9 100644 --- a/test/common/stats/stat_merger_test.cc +++ b/test/common/stats/stat_merger_test.cc @@ -17,13 +17,13 @@ namespace { class StatMergerTest : public testing::Test { public: StatMergerTest() - : stat_merger_(store_), whywassixafraidofseven_(store_.gauge("whywassixafraidofseven", - Gauge::ImportMode::Accumulate)) { + : stat_merger_(store_), whywassixafraidofseven_(store_.gaugeFromString( + "whywassixafraidofseven", Gauge::ImportMode::Accumulate)) { whywassixafraidofseven_.set(678); } void mergeTest(const std::string& name, Gauge::ImportMode initial, Gauge::ImportMode merge) { - Gauge& g1 = store_.gauge(name, initial); + Gauge& g1 = store_.gaugeFromString(name, initial); EXPECT_EQ(initial, g1.importMode()) << name; g1.mergeImportMode(merge); EXPECT_EQ(merge, g1.importMode()) << name; @@ -38,7 +38,7 @@ class StatMergerTest : public testing::Test { StatNamePool symbolic_pool(symbol_table); StatNameDynamicPool dynamic_pool(symbol_table); - for (absl::string_view segment : absl::StrSplit(input_name, ".")) { + for (absl::string_view segment : absl::StrSplit(input_name, '.')) { if (absl::StartsWith(segment, "D:")) { std::string hacked = absl::StrReplaceAll(segment.substr(2), {{",", "."}}); components.push_back(dynamic_pool.add(hacked)); @@ -66,35 +66,35 @@ class StatMergerTest : public testing::Test { TEST_F(StatMergerTest, CounterMerge) { // Child's value of the counter might already be non-zero by the first merge. - store_.counter("draculaer").inc(); - EXPECT_EQ(1, store_.counter("draculaer").latch()); + store_.counterFromString("draculaer").inc(); + EXPECT_EQ(1, store_.counterFromString("draculaer").latch()); Protobuf::Map counter_deltas; counter_deltas["draculaer"] = 1; stat_merger_.mergeStats(counter_deltas, empty_gauges_); // Initial combined value: 1+1. - EXPECT_EQ(2, store_.counter("draculaer").value()); - EXPECT_EQ(1, store_.counter("draculaer").latch()); + EXPECT_EQ(2, store_.counterFromString("draculaer").value()); + EXPECT_EQ(1, store_.counterFromString("draculaer").latch()); // The parent's counter increases by 1. counter_deltas["draculaer"] = 1; stat_merger_.mergeStats(counter_deltas, empty_gauges_); - EXPECT_EQ(3, store_.counter("draculaer").value()); - EXPECT_EQ(1, store_.counter("draculaer").latch()); + EXPECT_EQ(3, store_.counterFromString("draculaer").value()); + EXPECT_EQ(1, store_.counterFromString("draculaer").latch()); // Our own counter increases by 4, while the parent's stays constant. Total increase of 4. - store_.counter("draculaer").add(4); + store_.counterFromString("draculaer").add(4); counter_deltas["draculaer"] = 0; stat_merger_.mergeStats(counter_deltas, empty_gauges_); - EXPECT_EQ(7, store_.counter("draculaer").value()); - EXPECT_EQ(4, store_.counter("draculaer").latch()); + EXPECT_EQ(7, store_.counterFromString("draculaer").value()); + EXPECT_EQ(4, store_.counterFromString("draculaer").latch()); // Our counter and the parent's counter both increase by 2, total increase of 4. - store_.counter("draculaer").add(2); + store_.counterFromString("draculaer").add(2); counter_deltas["draculaer"] = 2; stat_merger_.mergeStats(counter_deltas, empty_gauges_); - EXPECT_EQ(11, store_.counter("draculaer").value()); - EXPECT_EQ(4, store_.counter("draculaer").latch()); + EXPECT_EQ(11, store_.counterFromString("draculaer").value()); + EXPECT_EQ(4, store_.counterFromString("draculaer").latch()); } TEST_F(StatMergerTest, BasicDefaultAccumulationImport) { @@ -142,7 +142,7 @@ TEST_F(StatMergerTest, MultipleImportsWithAccumulationLogic) { // the child has that gauge undefined. TEST_F(StatMergerTest, ExclusionsNotImported) { Gauge& some_sort_of_version = - store_.gauge("some.sort.of.version", Gauge::ImportMode::NeverImport); + store_.gaugeFromString("some.sort.of.version", Gauge::ImportMode::NeverImport); some_sort_of_version.set(12345); Protobuf::Map gauges; @@ -153,7 +153,8 @@ TEST_F(StatMergerTest, ExclusionsNotImported) { stat_merger_.mergeStats(empty_counter_deltas_, gauges); EXPECT_EQ(12345, some_sort_of_version.value()); EXPECT_FALSE( - store_.gauge("child.doesnt.have.this.version", Gauge::ImportMode::NeverImport).used()); + store_.gaugeFromString("child.doesnt.have.this.version", Gauge::ImportMode::NeverImport) + .used()); // Check the "undefined remains undefined" behavior for a bunch of other names. gauges["runtime.admin_overrides_active"] = 111; @@ -177,7 +178,7 @@ TEST_F(StatMergerTest, ExclusionsNotImported) { stat_merger_.mergeStats(empty_counter_deltas_, gauges); #define EXPECT_GAUGE_NOT_USED(name) \ - EXPECT_FALSE(store_.gauge(name, Gauge::ImportMode::NeverImport).used()) + EXPECT_FALSE(store_.gaugeFromString(name, Gauge::ImportMode::NeverImport).used()) EXPECT_GAUGE_NOT_USED("child.doesnt.have.this.version"); EXPECT_GAUGE_NOT_USED("runtime.admin_overrides_active"); @@ -236,7 +237,7 @@ class StatMergerDynamicTest : public testing::Test { StatNamePool symbolic_pool(*symbol_table_); StatNameDynamicPool dynamic_pool(*symbol_table_); - for (absl::string_view segment : absl::StrSplit(input_descriptor, ".")) { + for (absl::string_view segment : absl::StrSplit(input_descriptor, '.')) { if (absl::StartsWith(segment, "D:")) { std::string hacked = absl::StrReplaceAll(segment.substr(2), {{",", "."}}); components.push_back(dynamic_pool.add(hacked)); @@ -342,8 +343,8 @@ class StatMergerThreadLocalTest : public testing::Test { }; TEST_F(StatMergerThreadLocalTest, FilterOutUninitializedGauges) { - Gauge& g1 = store_.gauge("newgauge1", Gauge::ImportMode::Uninitialized); - Gauge& g2 = store_.gauge("newgauge2", Gauge::ImportMode::Accumulate); + Gauge& g1 = store_.gaugeFromString("newgauge1", Gauge::ImportMode::Uninitialized); + Gauge& g2 = store_.gaugeFromString("newgauge2", Gauge::ImportMode::Accumulate); std::vector gauges = store_.gauges(); ASSERT_EQ(1, gauges.size()); EXPECT_EQ(&g2, gauges[0].get()); @@ -369,11 +370,11 @@ TEST_F(StatMergerThreadLocalTest, NewStatFromParent) { gauges["newgauge1"] = 1; gauges["newgauge2"] = 2; stat_merger.mergeStats(counter_deltas, gauges); - EXPECT_EQ(0, store_.counter("newcounter0").value()); - EXPECT_EQ(0, store_.counter("newcounter0").latch()); - EXPECT_EQ(1, store_.counter("newcounter1").value()); - EXPECT_EQ(1, store_.counter("newcounter1").latch()); - EXPECT_EQ(1, store_.gauge("newgauge1", Gauge::ImportMode::Accumulate).value()); + EXPECT_EQ(0, store_.counterFromString("newcounter0").value()); + EXPECT_EQ(0, store_.counterFromString("newcounter0").latch()); + EXPECT_EQ(1, store_.counterFromString("newcounter1").value()); + EXPECT_EQ(1, store_.counterFromString("newcounter1").latch()); + EXPECT_EQ(1, store_.gaugeFromString("newgauge1", Gauge::ImportMode::Accumulate).value()); } // We accessed 0 and 1 above, but not 2. Now that StatMerger has been destroyed, // 2 should be gone. @@ -388,7 +389,7 @@ TEST_F(StatMergerThreadLocalTest, NewStatFromParent) { // from the parent, that we retain the import-mode, accumulating the updated // value. https://github.com/envoyproxy/envoy/issues/7227 TEST_F(StatMergerThreadLocalTest, RetainImportModeAfterMerge) { - Gauge& gauge = store_.gauge("mygauge", Gauge::ImportMode::Accumulate); + Gauge& gauge = store_.gaugeFromString("mygauge", Gauge::ImportMode::Accumulate); gauge.set(42); EXPECT_EQ(Gauge::ImportMode::Accumulate, gauge.importMode()); EXPECT_EQ(42, gauge.value()); @@ -407,7 +408,7 @@ TEST_F(StatMergerThreadLocalTest, RetainImportModeAfterMerge) { // from the parent, that we retain the import-mode, and don't accumulate the updated // value. https://github.com/envoyproxy/envoy/issues/7227 TEST_F(StatMergerThreadLocalTest, RetainNeverImportModeAfterMerge) { - Gauge& gauge = store_.gauge("mygauge", Gauge::ImportMode::NeverImport); + Gauge& gauge = store_.gaugeFromString("mygauge", Gauge::ImportMode::NeverImport); gauge.set(42); EXPECT_EQ(Gauge::ImportMode::NeverImport, gauge.importMode()); EXPECT_EQ(42, gauge.value()); diff --git a/test/common/stats/stat_test_utility.cc b/test/common/stats/stat_test_utility.cc index b32bd96b446a..a195614e5682 100644 --- a/test/common/stats/stat_test_utility.cc +++ b/test/common/stats/stat_test_utility.cc @@ -135,10 +135,10 @@ MemoryTest::Mode MemoryTest::mode() { #endif } -Counter& TestStore::counter(const std::string& name) { +Counter& TestStore::counterFromString(const std::string& name) { Counter*& counter_ref = counter_map_[name]; if (counter_ref == nullptr) { - counter_ref = &IsolatedStoreImpl::counter(name); + counter_ref = &IsolatedStoreImpl::counterFromString(name); } return *counter_ref; } @@ -157,10 +157,10 @@ Counter& TestStore::counterFromStatNameWithTags(const StatName& stat_name, return *counter_ref; } -Gauge& TestStore::gauge(const std::string& name, Gauge::ImportMode mode) { +Gauge& TestStore::gaugeFromString(const std::string& name, Gauge::ImportMode mode) { Gauge*& gauge_ref = gauge_map_[name]; if (gauge_ref == nullptr) { - gauge_ref = &IsolatedStoreImpl::gauge(name, mode); + gauge_ref = &IsolatedStoreImpl::gaugeFromString(name, mode); } return *gauge_ref; } @@ -178,10 +178,10 @@ Gauge& TestStore::gaugeFromStatNameWithTags(const StatName& stat_name, return *gauge_ref; } -Histogram& TestStore::histogram(const std::string& name, Histogram::Unit unit) { +Histogram& TestStore::histogramFromString(const std::string& name, Histogram::Unit unit) { Histogram*& histogram_ref = histogram_map_[name]; if (histogram_ref == nullptr) { - histogram_ref = &IsolatedStoreImpl::histogram(name, unit); + histogram_ref = &IsolatedStoreImpl::histogramFromString(name, unit); } return *histogram_ref; } diff --git a/test/common/stats/stat_test_utility.h b/test/common/stats/stat_test_utility.h index 22dcbfea5c05..b0ccd31763b5 100644 --- a/test/common/stats/stat_test_utility.h +++ b/test/common/stats/stat_test_utility.h @@ -90,20 +90,28 @@ class MemoryTest { // and symbol strings as production. class TestStore : public IsolatedStoreImpl { public: - TestStore() {} + TestStore() = default; // Constructs a store using a symbol table, allowing for explicit sharing. explicit TestStore(SymbolTable& symbol_table) : IsolatedStoreImpl(symbol_table) {} + Counter& counter(const std::string& name) { return counterFromString(name); } + Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) { + return gaugeFromString(name, import_mode); + } + Histogram& histogram(const std::string& name, Histogram::Unit unit) { + return histogramFromString(name, unit); + } + // Override the Stats::Store methods for name-based lookup of stats, to use // and update the string-maps in this class. Note that IsolatedStoreImpl // does not support deletion of stats, so we only have to track additions // to keep the maps up-to-date. // // Stats::Scope - Counter& counter(const std::string& name) override; - Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) override; - Histogram& histogram(const std::string& name, Histogram::Unit unit) override; + Counter& counterFromString(const std::string& name) override; + Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override; + Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override; Counter& counterFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags) override; Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags, diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 502175756140..8473222d2cfd 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -185,8 +185,8 @@ class HistogramTest : public testing::Test { TEST_F(StatsThreadLocalStoreTest, NoTls) { InSequence s; - Counter& c1 = store_->counter("c1"); - EXPECT_EQ(&c1, &store_->counter("c1")); + Counter& c1 = store_->counterFromString("c1"); + EXPECT_EQ(&c1, &store_->counterFromString("c1")); StatNameManagedStorage c1_name("c1", *symbol_table_); c1.add(100); auto found_counter = store_->findCounter(c1_name.statName()); @@ -196,8 +196,8 @@ TEST_F(StatsThreadLocalStoreTest, NoTls) { c1.add(100); EXPECT_EQ(200, found_counter->get().value()); - Gauge& g1 = store_->gauge("g1", Gauge::ImportMode::Accumulate); - EXPECT_EQ(&g1, &store_->gauge("g1", Gauge::ImportMode::Accumulate)); + Gauge& g1 = store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); + EXPECT_EQ(&g1, &store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate)); StatNameManagedStorage g1_name("g1", *symbol_table_); g1.set(100); auto found_gauge = store_->findGauge(g1_name.statName()); @@ -207,8 +207,8 @@ TEST_F(StatsThreadLocalStoreTest, NoTls) { g1.set(0); EXPECT_EQ(0, found_gauge->get().value()); - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - EXPECT_EQ(&h1, &store_->histogram("h1", Stats::Histogram::Unit::Unspecified)); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + EXPECT_EQ(&h1, &store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified)); StatNameManagedStorage h1_name("h1", *symbol_table_); auto found_histogram = store_->findHistogram(h1_name.statName()); ASSERT_TRUE(found_histogram.has_value()); @@ -233,8 +233,8 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - Counter& c1 = store_->counter("c1"); - EXPECT_EQ(&c1, &store_->counter("c1")); + Counter& c1 = store_->counterFromString("c1"); + EXPECT_EQ(&c1, &store_->counterFromString("c1")); StatNameManagedStorage c1_name("c1", *symbol_table_); c1.add(100); auto found_counter = store_->findCounter(c1_name.statName()); @@ -244,8 +244,8 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { c1.add(100); EXPECT_EQ(200, found_counter->get().value()); - Gauge& g1 = store_->gauge("g1", Gauge::ImportMode::Accumulate); - EXPECT_EQ(&g1, &store_->gauge("g1", Gauge::ImportMode::Accumulate)); + Gauge& g1 = store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); + EXPECT_EQ(&g1, &store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate)); StatNameManagedStorage g1_name("g1", *symbol_table_); g1.set(100); auto found_gauge = store_->findGauge(g1_name.statName()); @@ -255,8 +255,8 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { g1.set(0); EXPECT_EQ(0, found_gauge->get().value()); - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - EXPECT_EQ(&h1, &store_->histogram("h1", Stats::Histogram::Unit::Unspecified)); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + EXPECT_EQ(&h1, &store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified)); StatNameManagedStorage h1_name("h1", *symbol_table_); auto found_histogram = store_->findHistogram(h1_name.statName()); ASSERT_TRUE(found_histogram.has_value()); @@ -285,8 +285,8 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { store_->initializeThreading(main_thread_dispatcher_, tls_); ScopePtr scope1 = store_->createScope("scope1."); - Counter& c1 = store_->counter("c1"); - Counter& c2 = scope1->counter("c2"); + Counter& c1 = store_->counterFromString("c1"); + Counter& c2 = scope1->counterFromString("c2"); EXPECT_EQ("c1", c1.name()); EXPECT_EQ("scope1.c2", c2.name()); StatNameManagedStorage c1_name("c1", *symbol_table_); @@ -298,8 +298,8 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { ASSERT_TRUE(found_counter2.has_value()); EXPECT_EQ(&c2, &found_counter2->get()); - Gauge& g1 = store_->gauge("g1", Gauge::ImportMode::Accumulate); - Gauge& g2 = scope1->gauge("g2", Gauge::ImportMode::Accumulate); + Gauge& g1 = store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); + Gauge& g2 = scope1->gaugeFromString("g2", Gauge::ImportMode::Accumulate); EXPECT_EQ("g1", g1.name()); EXPECT_EQ("scope1.g2", g2.name()); StatNameManagedStorage g1_name("g1", *symbol_table_); @@ -311,8 +311,8 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { ASSERT_TRUE(found_gauge2.has_value()); EXPECT_EQ(&g2, &found_gauge2->get()); - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - Histogram& h2 = scope1->histogram("h2", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h2 = scope1->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); EXPECT_EQ("h1", h1.name()); EXPECT_EQ("scope1.h2", h2.name()); EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 100)); @@ -370,7 +370,7 @@ TEST_F(StatsThreadLocalStoreTest, SanitizePrefix) { store_->initializeThreading(main_thread_dispatcher_, tls_); ScopePtr scope1 = store_->createScope(std::string("scope1:\0:foo.", 13)); - Counter& c1 = scope1->counter("c1"); + Counter& c1 = scope1->counterFromString("c1"); EXPECT_EQ("scope1___foo.c1", c1.name()); store_->shutdownThreading(); @@ -390,7 +390,7 @@ TEST_F(StatsThreadLocalStoreTest, ScopeDelete) { store_->initializeThreading(main_thread_dispatcher_, tls_); ScopePtr scope1 = store_->createScope("scope1."); - scope1->counter("c1"); + scope1->counterFromString("c1"); EXPECT_EQ(1UL, store_->counters().size()); CounterSharedPtr c1 = TestUtility::findCounter(*store_, "scope1.c1"); EXPECT_EQ("scope1.c1", c1->name()); @@ -412,7 +412,7 @@ TEST_F(StatsThreadLocalStoreTest, NestedScopes) { store_->initializeThreading(main_thread_dispatcher_, tls_); ScopePtr scope1 = store_->createScope("scope1."); - Counter& c1 = scope1->counter("foo.bar"); + Counter& c1 = scope1->counterFromString("foo.bar"); EXPECT_EQ("scope1.foo.bar", c1.name()); StatNameManagedStorage c1_name("scope1.foo.bar", *symbol_table_); auto found_counter = store_->findCounter(c1_name.statName()); @@ -420,7 +420,7 @@ TEST_F(StatsThreadLocalStoreTest, NestedScopes) { EXPECT_EQ(&c1, &found_counter->get()); ScopePtr scope2 = scope1->createScope("foo."); - Counter& c2 = scope2->counter("bar"); + Counter& c2 = scope2->counterFromString("bar"); EXPECT_EQ(&c1, &c2); EXPECT_EQ("scope1.foo.bar", c2.name()); StatNameManagedStorage c2_name("scope1.foo.bar", *symbol_table_); @@ -432,7 +432,7 @@ TEST_F(StatsThreadLocalStoreTest, NestedScopes) { EXPECT_EQ(1UL, c1.value()); EXPECT_EQ(c1.value(), c2.value()); - Gauge& g1 = scope2->gauge("some_gauge", Gauge::ImportMode::Accumulate); + Gauge& g1 = scope2->gaugeFromString("some_gauge", Gauge::ImportMode::Accumulate); EXPECT_EQ("scope1.foo.some_gauge", g1.name()); store_->shutdownThreading(); @@ -449,8 +449,8 @@ TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { ScopePtr scope2 = store_->createScope("scope1."); // We will call alloc twice, but they should point to the same backing storage. - Counter& c1 = scope1->counter("c"); - Counter& c2 = scope2->counter("c"); + Counter& c1 = scope1->counterFromString("c"); + Counter& c2 = scope2->counterFromString("c"); EXPECT_EQ(&c1, &c2); c1.inc(); EXPECT_EQ(1UL, c1.value()); @@ -463,8 +463,8 @@ TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { EXPECT_EQ(1UL, store_->counters().size()); // Gauges should work the same way. - Gauge& g1 = scope1->gauge("g", Gauge::ImportMode::Accumulate); - Gauge& g2 = scope2->gauge("g", Gauge::ImportMode::Accumulate); + Gauge& g1 = scope1->gaugeFromString("g", Gauge::ImportMode::Accumulate); + Gauge& g2 = scope2->gaugeFromString("g", Gauge::ImportMode::Accumulate); EXPECT_EQ(&g1, &g2); g1.set(5); EXPECT_EQ(5UL, g1.value()); @@ -547,7 +547,7 @@ TEST_F(LookupWithStatNameTest, All) { // Validate that we sanitize away bad characters in the stats prefix. ScopePtr scope3 = scope1->createScope(std::string("foo:\0:.", 7)); - EXPECT_EQ("scope1.foo___.bar", scope3->counter("bar").name()); + EXPECT_EQ("scope1.foo___.bar", scope3->counterFromString("bar").name()); EXPECT_EQ(4UL, store_->counters().size()); EXPECT_EQ(2UL, store_->gauges().size()); @@ -575,7 +575,7 @@ TEST_F(StatsMatcherTLSTest, TestNoOpStatImpls) { // Testing No-op counters, gauges, histograms which match the prefix "noop". // Counter - Counter& noop_counter = store_->counter("noop_counter"); + Counter& noop_counter = store_->counterFromString("noop_counter"); EXPECT_EQ(noop_counter.name(), ""); EXPECT_EQ(noop_counter.value(), 0); noop_counter.add(1); @@ -584,14 +584,14 @@ TEST_F(StatsMatcherTLSTest, TestNoOpStatImpls) { EXPECT_EQ(noop_counter.value(), 0); noop_counter.reset(); EXPECT_EQ(noop_counter.value(), 0); - Counter& noop_counter_2 = store_->counter("noop_counter_2"); + Counter& noop_counter_2 = store_->counterFromString("noop_counter_2"); EXPECT_EQ(&noop_counter, &noop_counter_2); EXPECT_FALSE(noop_counter.used()); // hardcoded to return false in NullMetricImpl. EXPECT_EQ(0, noop_counter.latch()); // hardcoded to 0. EXPECT_EQ(0, noop_counter.use_count()); // null counter is contained in ThreadLocalStoreImpl. // Gauge - Gauge& noop_gauge = store_->gauge("noop_gauge", Gauge::ImportMode::Accumulate); + Gauge& noop_gauge = store_->gaugeFromString("noop_gauge", Gauge::ImportMode::Accumulate); EXPECT_EQ(noop_gauge.name(), ""); EXPECT_EQ(noop_gauge.value(), 0); noop_gauge.add(1); @@ -608,16 +608,16 @@ TEST_F(StatsMatcherTLSTest, TestNoOpStatImpls) { EXPECT_FALSE(noop_gauge.used()); // null gauge is contained in ThreadLocalStoreImpl. EXPECT_EQ(0, noop_gauge.use_count()); // null gauge is contained in ThreadLocalStoreImpl. - Gauge& noop_gauge_2 = store_->gauge("noop_gauge_2", Gauge::ImportMode::Accumulate); + Gauge& noop_gauge_2 = store_->gaugeFromString("noop_gauge_2", Gauge::ImportMode::Accumulate); EXPECT_EQ(&noop_gauge, &noop_gauge_2); // Histogram Histogram& noop_histogram = - store_->histogram("noop_histogram", Stats::Histogram::Unit::Unspecified); + store_->histogramFromString("noop_histogram", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(noop_histogram.name(), ""); EXPECT_FALSE(noop_histogram.used()); Histogram& noop_histogram_2 = - store_->histogram("noop_histogram_2", Stats::Histogram::Unit::Unspecified); + store_->histogramFromString("noop_histogram_2", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(&noop_histogram, &noop_histogram_2); store_->shutdownThreading(); @@ -638,23 +638,25 @@ TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { store_->setStatsMatcher(std::make_unique(stats_config_)); // The creation of counters/gauges/histograms which have no uppercase letters should succeed. - Counter& lowercase_counter = store_->counter("lowercase_counter"); + Counter& lowercase_counter = store_->counterFromString("lowercase_counter"); EXPECT_EQ(lowercase_counter.name(), "lowercase_counter"); - Gauge& lowercase_gauge = store_->gauge("lowercase_gauge", Gauge::ImportMode::Accumulate); + Gauge& lowercase_gauge = + store_->gaugeFromString("lowercase_gauge", Gauge::ImportMode::Accumulate); EXPECT_EQ(lowercase_gauge.name(), "lowercase_gauge"); Histogram& lowercase_histogram = - store_->histogram("lowercase_histogram", Stats::Histogram::Unit::Unspecified); + store_->histogramFromString("lowercase_histogram", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(lowercase_histogram.name(), "lowercase_histogram"); // And the creation of counters/gauges/histograms which have uppercase letters should fail. - Counter& uppercase_counter = store_->counter("UPPERCASE_counter"); + Counter& uppercase_counter = store_->counterFromString("UPPERCASE_counter"); EXPECT_EQ(uppercase_counter.name(), ""); uppercase_counter.inc(); EXPECT_EQ(uppercase_counter.value(), 0); uppercase_counter.inc(); EXPECT_EQ(uppercase_counter.value(), 0); - Gauge& uppercase_gauge = store_->gauge("uppercase_GAUGE", Gauge::ImportMode::Accumulate); + Gauge& uppercase_gauge = + store_->gaugeFromString("uppercase_GAUGE", Gauge::ImportMode::Accumulate); EXPECT_EQ(uppercase_gauge.name(), ""); uppercase_gauge.inc(); EXPECT_EQ(uppercase_gauge.value(), 0); @@ -664,7 +666,7 @@ TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { // Histograms are harder to query and test, so we resort to testing that name() returns the empty // string. Histogram& uppercase_histogram = - store_->histogram("upperCASE_histogram", Stats::Histogram::Unit::Unspecified); + store_->histogramFromString("upperCASE_histogram", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(uppercase_histogram.name(), ""); // Adding another exclusion rule -- now we reject not just uppercase stats but those starting with @@ -673,42 +675,43 @@ TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { "invalid"); store_->setStatsMatcher(std::make_unique(stats_config_)); - Counter& valid_counter = store_->counter("valid_counter"); + Counter& valid_counter = store_->counterFromString("valid_counter"); valid_counter.inc(); EXPECT_EQ(valid_counter.value(), 1); - Counter& invalid_counter = store_->counter("invalid_counter"); + Counter& invalid_counter = store_->counterFromString("invalid_counter"); invalid_counter.inc(); EXPECT_EQ(invalid_counter.value(), 0); // But the old exclusion rule still holds. - Counter& invalid_counter_2 = store_->counter("also_INVALID_counter"); + Counter& invalid_counter_2 = store_->counterFromString("also_INVALID_counter"); invalid_counter_2.inc(); EXPECT_EQ(invalid_counter_2.value(), 0); // And we expect the same behavior from gauges and histograms. - Gauge& valid_gauge = store_->gauge("valid_gauge", Gauge::ImportMode::Accumulate); + Gauge& valid_gauge = store_->gaugeFromString("valid_gauge", Gauge::ImportMode::Accumulate); valid_gauge.set(2); EXPECT_EQ(valid_gauge.value(), 2); - Gauge& invalid_gauge_1 = store_->gauge("invalid_gauge", Gauge::ImportMode::Accumulate); + Gauge& invalid_gauge_1 = store_->gaugeFromString("invalid_gauge", Gauge::ImportMode::Accumulate); invalid_gauge_1.inc(); EXPECT_EQ(invalid_gauge_1.value(), 0); - Gauge& invalid_gauge_2 = store_->gauge("also_INVALID_gauge", Gauge::ImportMode::Accumulate); + Gauge& invalid_gauge_2 = + store_->gaugeFromString("also_INVALID_gauge", Gauge::ImportMode::Accumulate); invalid_gauge_2.inc(); EXPECT_EQ(invalid_gauge_2.value(), 0); Histogram& valid_histogram = - store_->histogram("valid_histogram", Stats::Histogram::Unit::Unspecified); + store_->histogramFromString("valid_histogram", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(valid_histogram.name(), "valid_histogram"); Histogram& invalid_histogram_1 = - store_->histogram("invalid_histogram", Stats::Histogram::Unit::Unspecified); + store_->histogramFromString("invalid_histogram", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(invalid_histogram_1.name(), ""); Histogram& invalid_histogram_2 = - store_->histogram("also_INVALID_histogram", Stats::Histogram::Unit::Unspecified); + store_->histogramFromString("also_INVALID_histogram", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(invalid_histogram_2.name(), ""); // Expected to free lowercase_counter, lowercase_gauge, valid_counter, valid_gauge @@ -789,13 +792,13 @@ class RememberStatsMatcherTest : public testing::TestWithParam { LookupStatFn lookupCounterFn() { return [this](const std::string& stat_name) -> std::string { - return scope_->counter(stat_name).name(); + return scope_->counterFromString(stat_name).name(); }; } LookupStatFn lookupGaugeFn() { return [this](const std::string& stat_name) -> std::string { - return scope_->gauge(stat_name, Gauge::ImportMode::Accumulate).name(); + return scope_->gaugeFromString(stat_name, Gauge::ImportMode::Accumulate).name(); }; } @@ -812,7 +815,7 @@ class RememberStatsMatcherTest : public testing::TestWithParam { LookupStatFn lookupHistogramFn() { return [this](const std::string& stat_name) -> std::string { - return scope_->histogram(stat_name, Stats::Histogram::Unit::Unspecified).name(); + return scope_->histogramFromString(stat_name, Stats::Histogram::Unit::Unspecified).name(); }; } @@ -859,9 +862,9 @@ TEST_P(RememberStatsMatcherTest, HistogramAcceptsAll) { testAcceptsAll(lookupHis TEST_F(StatsThreadLocalStoreTest, RemoveRejectedStats) { store_->initializeThreading(main_thread_dispatcher_, tls_); - Counter& counter = store_->counter("c1"); - Gauge& gauge = store_->gauge("g1", Gauge::ImportMode::Accumulate); - Histogram& histogram = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); + Counter& counter = store_->counterFromString("c1"); + Gauge& gauge = store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); + Histogram& histogram = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); ASSERT_EQ(1, store_->counters().size()); // "c1". EXPECT_TRUE(&counter == store_->counters()[0].get() || &counter == store_->counters()[1].get()); // counters() order is non-deterministic. @@ -897,7 +900,7 @@ TEST_F(StatsThreadLocalStoreTest, NonHotRestartNoTruncation) { // Allocate a stat greater than the max name length. const std::string name_1(MaxStatNameLength + 1, 'A'); - store_->counter(name_1); + store_->counterFromString(name_1); // This works fine, and we can find it by its long name because heap-stats do not // get truncated. @@ -949,7 +952,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithoutTlsFakeSymbolTable) { init(true); TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( - 100, [this](absl::string_view name) { store_->counter(std::string(name)); }); + 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 1358576); // Jan 23, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 1.4 * million_); } @@ -959,7 +962,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsFakeSymbolTable) { initThreading(); TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( - 100, [this](absl::string_view name) { store_->counter(std::string(name)); }); + 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 1498128); // Jan 23, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 1.6 * million_); } @@ -969,7 +972,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithoutTlsRealSymbolTable) { init(false); TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( - 100, [this](absl::string_view name) { store_->counter(std::string(name)); }); + 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 689648); // Jan 23, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.75 * million_); } @@ -979,7 +982,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsRealSymbolTable) { initThreading(); TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( - 100, [this](absl::string_view name) { store_->counter(std::string(name)); }); + 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 829200); // Jan 23, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.9 * million_); } @@ -988,11 +991,11 @@ TEST_F(StatsThreadLocalStoreTest, ShuttingDown) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - store_->counter("c1"); - store_->gauge("g1", Gauge::ImportMode::Accumulate); + store_->counterFromString("c1"); + store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); store_->shutdownThreading(); - store_->counter("c2"); - store_->gauge("g2", Gauge::ImportMode::Accumulate); + store_->counterFromString("c2"); + store_->gaugeFromString("g2", Gauge::ImportMode::Accumulate); // We do not keep ref-counts for counters and gauges in the TLS cache, so // all these stats should have a ref-count of 2: one for the SharedPtr @@ -1010,7 +1013,7 @@ TEST_F(StatsThreadLocalStoreTest, MergeDuringShutDown) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); EXPECT_EQ("h1", h1.name()); EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 1)); @@ -1042,7 +1045,7 @@ TEST(ThreadLocalStoreThreadTest, ConstructDestruct) { // Histogram tests TEST_F(HistogramTest, BasicSingleHistogramMerge) { - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); EXPECT_EQ("h1", h1.name()); expectCallAndAccumulate(h1, 0); @@ -1058,8 +1061,8 @@ TEST_F(HistogramTest, BasicSingleHistogramMerge) { } TEST_F(HistogramTest, BasicMultiHistogramMerge) { - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - Histogram& h2 = store_->histogram("h2", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h2 = store_->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); EXPECT_EQ("h1", h1.name()); EXPECT_EQ("h2", h2.name()); @@ -1071,8 +1074,8 @@ TEST_F(HistogramTest, BasicMultiHistogramMerge) { } TEST_F(HistogramTest, MultiHistogramMultipleMerges) { - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - Histogram& h2 = store_->histogram("h2", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h2 = store_->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); EXPECT_EQ("h1", h1.name()); EXPECT_EQ("h2", h2.name()); @@ -1102,8 +1105,8 @@ TEST_F(HistogramTest, MultiHistogramMultipleMerges) { TEST_F(HistogramTest, BasicScopeHistogramMerge) { ScopePtr scope1 = store_->createScope("scope1."); - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - Histogram& h2 = scope1->histogram("h2", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h2 = scope1->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); EXPECT_EQ("h1", h1.name()); EXPECT_EQ("scope1.h2", h2.name()); @@ -1113,8 +1116,8 @@ TEST_F(HistogramTest, BasicScopeHistogramMerge) { } TEST_F(HistogramTest, BasicHistogramSummaryValidate) { - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - Histogram& h2 = store_->histogram("h2", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h2 = store_->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); expectCallAndAccumulate(h1, 1); @@ -1153,7 +1156,7 @@ TEST_F(HistogramTest, BasicHistogramSummaryValidate) { // Validates the summary after known value merge in to same histogram. TEST_F(HistogramTest, BasicHistogramMergeSummary) { - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); for (size_t i = 0; i < 50; ++i) { expectCallAndAccumulate(h1, i); @@ -1181,8 +1184,8 @@ TEST_F(HistogramTest, BasicHistogramMergeSummary) { TEST_F(HistogramTest, BasicHistogramUsed) { ScopePtr scope1 = store_->createScope("scope1."); - Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - Histogram& h2 = scope1->histogram("h2", Stats::Histogram::Unit::Unspecified); + Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Histogram& h2 = scope1->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); EXPECT_EQ("h1", h1.name()); EXPECT_EQ("scope1.h2", h2.name()); @@ -1211,7 +1214,8 @@ TEST_F(HistogramTest, BasicHistogramUsed) { TEST_F(HistogramTest, ParentHistogramBucketSummary) { ScopePtr scope1 = store_->createScope("scope1."); - Histogram& histogram = store_->histogram("histogram", Stats::Histogram::Unit::Unspecified); + Histogram& histogram = + store_->histogramFromString("histogram", Stats::Histogram::Unit::Unspecified); store_->mergeHistograms([]() -> void {}); ASSERT_EQ(1, store_->histograms().size()); ParentHistogramSharedPtr parent_histogram = store_->histograms()[0]; @@ -1296,7 +1300,7 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB } } - ~ClusterShutdownCleanupStarvationTest() { + ~ClusterShutdownCleanupStarvationTest() override { { BlockingBarrier blocking_barrier(1); main_dispatcher_->post(blocking_barrier.run([this]() { diff --git a/test/common/stream_info/BUILD b/test/common/stream_info/BUILD index 9c60f05c1c2b..de97bc18ddea 100644 --- a/test/common/stream_info/BUILD +++ b/test/common/stream_info/BUILD @@ -43,8 +43,11 @@ envoy_cc_test_library( name = "test_util", hdrs = ["test_util.h"], deps = [ + "//include/envoy/http:request_id_extension_interface", "//include/envoy/stream_info:stream_info_interface", "//source/common/common:assert_lib", + "//source/common/http:request_id_extension_lib", + "//source/common/runtime:runtime_lib", "//source/common/stream_info:filter_state_lib", "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/common/stream_info/stream_info_impl_test.cc b/test/common/stream_info/stream_info_impl_test.cc index e51947352282..19d86a4d98c4 100644 --- a/test/common/stream_info/stream_info_impl_test.cc +++ b/test/common/stream_info/stream_info_impl_test.cc @@ -237,6 +237,24 @@ TEST_F(StreamInfoImplTest, RequestHeadersTest) { EXPECT_EQ(&headers, stream_info.getRequestHeaders()); } +TEST_F(StreamInfoImplTest, DefaultRequestIDExtensionTest) { + StreamInfoImpl stream_info(test_time_.timeSystem()); + EXPECT_TRUE(stream_info.getRequestIDExtension()); + + auto rid_extension = stream_info.getRequestIDExtension(); + + Http::RequestHeaderMapImpl request_headers; + Http::ResponseHeaderMapImpl response_headers; + rid_extension->set(request_headers, false); + rid_extension->set(request_headers, true); + rid_extension->setInResponse(response_headers, request_headers); + uint64_t out = 123; + EXPECT_FALSE(rid_extension->modBy(request_headers, out, 10000)); + EXPECT_EQ(out, 123); + rid_extension->setTraceStatus(request_headers, Http::TraceStatus::Forced); + EXPECT_EQ(rid_extension->getTraceStatus(request_headers), Http::TraceStatus::NoTrace); +} + } // namespace } // namespace StreamInfo } // namespace Envoy diff --git a/test/common/stream_info/test_util.h b/test/common/stream_info/test_util.h index 086f31666152..b141abeb0c2e 100644 --- a/test/common/stream_info/test_util.h +++ b/test/common/stream_info/test_util.h @@ -4,6 +4,8 @@ #include "envoy/stream_info/stream_info.h" #include "common/common/assert.h" +#include "common/http/request_id_extension_impl.h" +#include "common/runtime/runtime_impl.h" #include "common/stream_info/filter_state_impl.h" #include "test/test_common/simulated_time_system.h" @@ -18,6 +20,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { // Use 1999-01-01 00:00:00 +0 time_t fake_time = 915148800; start_time_ = std::chrono::system_clock::from_time_t(fake_time); + request_id_extension_ = Http::RequestIDExtensionFactory::defaultInstance(random_); MonotonicTime now = timeSystem().monotonicTime(); start_time_monotonic_ = now; @@ -207,6 +210,13 @@ class TestStreamInfo : public StreamInfo::StreamInfo { const Http::RequestHeaderMap* getRequestHeaders() const override { return request_headers_; } + void setRequestIDExtension(Http::RequestIDExtensionSharedPtr request_id_extension) override { + request_id_extension_ = request_id_extension; + } + Http::RequestIDExtensionSharedPtr getRequestIDExtension() const override { + return request_id_extension_; + } + Event::TimeSystem& timeSystem() { return test_time_.timeSystem(); } void setUpstreamClusterInfo( @@ -217,6 +227,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { return upstream_cluster_info_; } + Runtime::RandomGeneratorImpl random_; SystemTime start_time_; MonotonicTime start_time_monotonic_; @@ -254,6 +265,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { const Http::RequestHeaderMap* request_headers_{}; Envoy::Event::SimulatedTimeSystem test_time_; absl::optional upstream_cluster_info_{}; + Http::RequestIDExtensionSharedPtr request_id_extension_; }; } // namespace Envoy diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 3eb47af7b6a2..32e0cf5c033e 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -69,11 +69,9 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { */ class ConnPoolImplForTest : public ConnPoolImpl { public: - ConnPoolImplForTest(Event::MockDispatcher& dispatcher, - Upstream::ClusterInfoConstSharedPtr cluster, + ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, NiceMock* upstream_ready_timer) - : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), - Upstream::ResourcePriority::Default, nullptr, nullptr), + : ConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, nullptr), mock_dispatcher_(dispatcher), mock_upstream_ready_timer_(upstream_ready_timer) {} ~ConnPoolImplForTest() override { @@ -153,7 +151,8 @@ class TcpConnPoolImplTest : public testing::Test { public: TcpConnPoolImplTest() : upstream_ready_timer_(new NiceMock(&dispatcher_)), - conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} + host_(Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000")), + conn_pool_(dispatcher_, host_, upstream_ready_timer_) {} ~TcpConnPoolImplTest() override { EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); @@ -162,6 +161,7 @@ class TcpConnPoolImplTest : public testing::Test { NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; NiceMock* upstream_ready_timer_; + Upstream::HostSharedPtr host_; ConnPoolImplForTest conn_pool_; NiceMock runtime_; }; @@ -264,6 +264,8 @@ struct ActiveTestConn { bool completed_{}; }; +TEST_F(TcpConnPoolImplTest, HostAccessor) { EXPECT_EQ(conn_pool_.host(), host_); } + /** * Verify that connections are drained when requested. */ diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index cfc8159980a5..34a92a7798cb 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -39,7 +39,6 @@ using testing::_; using testing::Invoke; using testing::InvokeWithoutArgs; -using testing::MatchesRegex; using testing::NiceMock; using testing::Return; using testing::ReturnPointee; @@ -841,7 +840,7 @@ class TcpProxyTest : public testing::Test { } void configure(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy& config) { - config_.reset(new Config(config, factory_context_)); + config_ = std::make_shared(config, factory_context_); } envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy defaultConfig() { @@ -914,6 +913,12 @@ class TcpProxyTest : public testing::Test { EXPECT_CALL(filter_callbacks_.connection_, enableHalfClose(true)); EXPECT_CALL(filter_callbacks_.connection_, readDisable(true)); filter_->initializeReadFilterCallbacks(filter_callbacks_); + filter_callbacks_.connection_.streamInfo().setDownstreamSslConnection( + filter_callbacks_.connection_.ssl()); + filter_callbacks_.connection_.streamInfo().setDownstreamLocalAddress( + filter_callbacks_.connection_.localAddress()); + filter_callbacks_.connection_.streamInfo().setDownstreamRemoteAddress( + filter_callbacks_.connection_.remoteAddress()); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); EXPECT_EQ(absl::optional(), filter_->computeHashKey()); @@ -1299,7 +1304,7 @@ TEST_F(TcpProxyTest, WeightedClusterWithMetadataMatch) { k0: v0 )EOF"; - config_.reset(new Config(constructConfigFromYaml(yaml, factory_context_))); + config_ = std::make_shared(constructConfigFromYaml(yaml, factory_context_)); ProtobufWkt::Value v0, v1, v2; v0.set_string_value("v0"); @@ -1586,34 +1591,6 @@ TEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(AccessLogDownstreamAddress)) { EXPECT_EQ(access_log_data_, "1.1.1.1 1.1.1.2:20000"); } -// Test that access log fields %BYTES_RECEIVED%, %BYTES_SENT%, %START_TIME%, %DURATION% are -// all correctly logged. -TEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(AccessLogBytesRxTxDuration)) { - setup(1, accessLogConfig("bytesreceived=%BYTES_RECEIVED% bytessent=%BYTES_SENT% " - "datetime=%START_TIME% nonzeronum=%DURATION%")); - - raiseEventUpstreamConnected(0); - Buffer::OwnedImpl buffer("a"); - filter_->onData(buffer, false); - Buffer::OwnedImpl response("bb"); - upstream_callbacks_->onUpstreamData(response, false); - - timeSystem().sleep(std::chrono::milliseconds(1)); - upstream_callbacks_->onEvent(Network::ConnectionEvent::RemoteClose); - filter_.reset(); - -#ifndef GTEST_USES_SIMPLE_RE - EXPECT_THAT(access_log_data_, - MatchesRegex( - "bytesreceived=1 bytessent=2 datetime=[0-9-]+T[0-9:.]+Z nonzeronum=[1-9][0-9]*")); -#else - EXPECT_THAT(access_log_data_, - MatchesRegex("bytesreceived=1 bytessent=2 " - "datetime=\\d+-\\d+-\\d+T\\d+:\\d+:\\d+\\.\\d+Z nonzeronum=\\d+")); - EXPECT_THAT(access_log_data_, Not(MatchesRegex("nonzeronum=0"))); -#endif -} - TEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(AccessLogUpstreamSSLConnection)) { setup(1); @@ -1783,7 +1760,7 @@ class TcpProxyRoutingTest : public testing::Test { cluster: fake_cluster )EOF"; - config_.reset(new Config(constructConfigFromYaml(yaml, factory_context_))); + config_ = std::make_shared(constructConfigFromYaml(yaml, factory_context_)); } void initializeFilter() { @@ -1937,7 +1914,7 @@ class TcpProxyNonDeprecatedConfigRoutingTest : public TcpProxyRoutingTest { cluster: fake_cluster )EOF"; - config_.reset(new Config(constructConfigFromYaml(yaml, factory_context_))); + config_ = std::make_shared(constructConfigFromYaml(yaml, factory_context_)); } }; @@ -1978,7 +1955,7 @@ class TcpProxyHashingTest : public testing::Test { - source_ip: {} )EOF"; - config_.reset(new Config(constructConfigFromYaml(yaml, factory_context_))); + config_ = std::make_shared(constructConfigFromYaml(yaml, factory_context_)); } void initializeFilter() { diff --git a/test/common/tcp_proxy/upstream_test.cc b/test/common/tcp_proxy/upstream_test.cc index 6435c687410b..9464d5d25970 100644 --- a/test/common/tcp_proxy/upstream_test.cc +++ b/test/common/tcp_proxy/upstream_test.cc @@ -1,3 +1,5 @@ +#include + #include "common/tcp_proxy/upstream.h" #include "test/mocks/buffer/mocks.h" @@ -19,7 +21,7 @@ class HttpUpstreamTest : public testing::Test { HttpUpstreamTest() { EXPECT_CALL(encoder_, getStream()).Times(AnyNumber()); EXPECT_CALL(encoder_, encodeHeaders(_, false)); - upstream_.reset(new HttpUpstream(callbacks_, hostname_)); + upstream_ = std::make_unique(callbacks_, hostname_); upstream_->setRequestEncoder(encoder_, true); } @@ -39,7 +41,7 @@ TEST_F(HttpUpstreamTest, WriteUpstream) { upstream_->encodeData(buffer2, true); // New upstream with no encoder - upstream_.reset(new HttpUpstream(callbacks_, hostname_)); + upstream_ = std::make_unique(callbacks_, hostname_); upstream_->encodeData(buffer2, true); } @@ -73,7 +75,7 @@ TEST_F(HttpUpstreamTest, ReadDisable) { EXPECT_TRUE(upstream_->readDisable(false)); // New upstream with no encoder - upstream_.reset(new HttpUpstream(callbacks_, hostname_)); + upstream_ = std::make_unique(callbacks_, hostname_); EXPECT_FALSE(upstream_->readDisable(true)); } diff --git a/test/common/tracing/BUILD b/test/common/tracing/BUILD index 3760ef373406..1ab8faf7c58e 100644 --- a/test/common/tracing/BUILD +++ b/test/common/tracing/BUILD @@ -14,12 +14,13 @@ envoy_cc_test( "http_tracer_impl_test.cc", ], deps = [ + "//include/envoy/http:request_id_extension_interface", "//source/common/common:base64_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/common/http:message_lib", + "//source/common/http:request_id_extension_lib", "//source/common/runtime:runtime_lib", - "//source/common/runtime:uuid_util_lib", "//source/common/tracing:http_tracer_lib", "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index c740de515771..3d05232cb56f 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -3,15 +3,16 @@ #include #include "envoy/config/core/v3/base.pb.h" +#include "envoy/http/request_id_extension.h" #include "envoy/type/tracing/v3/custom_tag.pb.h" #include "common/common/base64.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" #include "common/http/message_impl.h" +#include "common/http/request_id_extension_impl.h" #include "common/network/utility.h" #include "common/runtime/runtime_impl.h" -#include "common/runtime/uuid_util.h" #include "common/tracing/http_tracer_impl.h" #include "test/mocks/http/mocks.h" @@ -46,17 +47,20 @@ TEST(HttpTracerUtilityTest, IsTracing) { Runtime::RandomGeneratorImpl random; std::string not_traceable_guid = random.uuid(); + auto rid_extension = Http::RequestIDExtensionFactory::defaultInstance(random); + ON_CALL(stream_info, getRequestIDExtension()).WillByDefault(Return(rid_extension)); + std::string forced_guid = random.uuid(); - UuidUtils::setTraceableUuid(forced_guid, UuidTraceStatus::Forced); Http::TestRequestHeaderMapImpl forced_header{{"x-request-id", forced_guid}}; + rid_extension->setTraceStatus(forced_header, Http::TraceStatus::Forced); std::string sampled_guid = random.uuid(); - UuidUtils::setTraceableUuid(sampled_guid, UuidTraceStatus::Sampled); Http::TestRequestHeaderMapImpl sampled_header{{"x-request-id", sampled_guid}}; + rid_extension->setTraceStatus(sampled_header, Http::TraceStatus::Sampled); std::string client_guid = random.uuid(); - UuidUtils::setTraceableUuid(client_guid, UuidTraceStatus::Client); Http::TestRequestHeaderMapImpl client_header{{"x-request-id", client_guid}}; + rid_extension->setTraceStatus(client_header, Http::TraceStatus::Client); Http::TestRequestHeaderMapImpl not_traceable_header{{"x-request-id", not_traceable_guid}}; Http::TestRequestHeaderMapImpl empty_header{}; @@ -715,7 +719,7 @@ class HttpTracerImplTest : public testing::Test { HttpTracerImplTest() { driver_ = new MockDriver(); DriverPtr driver_ptr(driver_); - tracer_ = std::make_unique(std::move(driver_ptr), local_info_); + tracer_ = std::make_shared(std::move(driver_ptr), local_info_); } Http::TestRequestHeaderMapImpl request_headers_{ @@ -726,7 +730,7 @@ class HttpTracerImplTest : public testing::Test { NiceMock local_info_; MockConfig config_; MockDriver* driver_; - HttpTracerPtr tracer_; + HttpTracerSharedPtr tracer_; }; TEST_F(HttpTracerImplTest, BasicFunctionalityNullSpan) { diff --git a/test/common/tracing/http_tracer_manager_impl_test.cc b/test/common/tracing/http_tracer_manager_impl_test.cc index bbddb563033d..048cdd17b6d7 100644 --- a/test/common/tracing/http_tracer_manager_impl_test.cc +++ b/test/common/tracing/http_tracer_manager_impl_test.cc @@ -26,9 +26,10 @@ class SampleTracer : public HttpTracer { class SampleTracerFactory : public Server::Configuration::TracerFactory { public: - Tracing::HttpTracerPtr createHttpTracer(const Protobuf::Message&, - Server::Configuration::TracerFactoryContext&) override { - return std::make_unique(); + Tracing::HttpTracerSharedPtr + createHttpTracer(const Protobuf::Message&, + Server::Configuration::TracerFactoryContext&) override { + return std::make_shared(); } std::string name() const override { return "envoy.tracers.sample"; } diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index c53b8368e9ed..789cb5401925 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -2,8 +2,9 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", "envoy_cc_test", - "envoy_cc_test_binary", "envoy_cc_test_library", "envoy_package", ) @@ -42,6 +43,15 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "cluster_update_tracker_test", + srcs = ["cluster_update_tracker_test.cc"], + deps = [ + "//source/common/upstream:cluster_update_tracker_lib", + "//test/mocks/upstream:upstream_mocks", + ], +) + envoy_cc_test( name = "conn_pool_map_impl_test", srcs = ["conn_pool_map_impl_test.cc"], @@ -71,6 +81,7 @@ envoy_cc_test( "//source/common/upstream:eds_lib", "//source/extensions/transport_sockets/raw_buffer:config", "//source/server:transport_socket_config_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", @@ -329,7 +340,7 @@ envoy_cc_test( ], ) -envoy_cc_test_binary( +envoy_cc_benchmark_binary( name = "load_balancer_benchmark", srcs = ["load_balancer_benchmark.cc"], external_deps = [ @@ -347,6 +358,12 @@ envoy_cc_test_binary( ], ) +envoy_benchmark_test( + name = "load_balancer_benchmark_test", + timeout = "long", + benchmark_binary = "load_balancer_benchmark", +) + envoy_cc_test( name = "subset_lb_test", srcs = ["subset_lb_test.cc"], @@ -405,6 +422,7 @@ envoy_cc_test( "//source/common/upstream:strict_dns_cluster_lib", "//source/extensions/transport_sockets/raw_buffer:config", "//source/server:transport_socket_config_lib", + "//test/common/stats:stat_test_utility_lib", "//test/mocks:common_lib", "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", @@ -456,6 +474,7 @@ envoy_cc_test_library( "//source/common/upstream:subset_lb_lib", "//source/extensions/transport_sockets/raw_buffer:config", "//source/extensions/transport_sockets/tls:context_lib", + "//test/common/stats:stat_test_utility_lib", "//test/integration/clusters:custom_static_cluster", "//test/mocks/access_log:access_log_mocks", "//test/mocks/api:api_mocks", diff --git a/test/common/upstream/cluster_factory_impl_test.cc b/test/common/upstream/cluster_factory_impl_test.cc index 01e98b181e13..dbeac5c6c75b 100644 --- a/test/common/upstream/cluster_factory_impl_test.cc +++ b/test/common/upstream/cluster_factory_impl_test.cc @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -50,8 +51,8 @@ class TestStaticClusterFactory : public ClusterFactoryImplBase { class ClusterFactoryTestBase { protected: ClusterFactoryTestBase() : api_(Api::createApiForTest(stats_)) { - outlier_event_logger_.reset(new Outlier::MockEventLogger()); - dns_resolver_.reset(new Network::MockDnsResolver()); + outlier_event_logger_ = std::make_shared(); + dns_resolver_ = std::make_shared(); } NiceMock admin_; @@ -219,6 +220,35 @@ TEST_F(TestStaticClusterImplTest, UnsupportedClusterType) { "'envoy.clusters.bad_cluster_name'"); } +TEST_F(TestStaticClusterImplTest, HostnameWithoutDNS) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + common_lb_config: + consistent_hashing_lb_config: + use_hostname_for_hashing: true + hosts: + - socket_address: + address: 10.0.0.1 + port_value: 443 + cluster_type: + name: envoy.clusters.test_static + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + { + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + ClusterFactoryImplBase::create( + cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, + random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + std::move(outlier_event_logger_), false, validation_visitor_, *api_); + }, + EnvoyException, + "Cannot use hostname for consistent hashing loadbalancing for cluster of type: " + "'envoy.clusters.test_static'"); +} + } // namespace } // namespace Upstream } // namespace Envoy diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index a6db18af064f..fb5f335a0565 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -345,7 +345,7 @@ TEST_F(ClusterManagerImplTest, ValidClusterName) { ->second.get() .info() ->statsScope() - .counter("foo") + .counterFromString("foo") .inc(); EXPECT_EQ(1UL, factory_.stats_.counter("cluster.cluster_name.foo").value()); } diff --git a/test/common/upstream/cluster_update_tracker_test.cc b/test/common/upstream/cluster_update_tracker_test.cc new file mode 100644 index 000000000000..3d3dc9c56e5b --- /dev/null +++ b/test/common/upstream/cluster_update_tracker_test.cc @@ -0,0 +1,91 @@ +#include "common/upstream/cluster_update_tracker.h" + +#include "test/mocks/upstream/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Upstream { +namespace { + +class ClusterUpdateTrackerTest : public testing::Test { +public: + ClusterUpdateTrackerTest() { + expected_.cluster_.info_->name_ = cluster_name_; + irrelevant_.cluster_.info_->name_ = "unrelated_cluster"; + } + + NiceMock cm_; + NiceMock expected_; + NiceMock irrelevant_; + const std::string cluster_name_{"fake_cluster"}; +}; + +TEST_F(ClusterUpdateTrackerTest, ClusterDoesNotExistAtConstructionTime) { + EXPECT_CALL(cm_, get(cluster_name_)).WillOnce(Return(nullptr)); + + ClusterUpdateTracker cluster_tracker(cm_, cluster_name_); + + EXPECT_FALSE(cluster_tracker.exists()); + EXPECT_EQ(cluster_tracker.info(), nullptr); +} + +TEST_F(ClusterUpdateTrackerTest, ClusterDoesExistAtConstructionTime) { + EXPECT_CALL(cm_, get(cluster_name_)).WillOnce(Return(&expected_)); + + ClusterUpdateTracker cluster_tracker(cm_, cluster_name_); + + EXPECT_TRUE(cluster_tracker.exists()); + EXPECT_EQ(cluster_tracker.info(), expected_.cluster_.info_); +} + +TEST_F(ClusterUpdateTrackerTest, ShouldProperlyHandleUpdateCallbacks) { + EXPECT_CALL(cm_, get(cluster_name_)).WillOnce(Return(nullptr)); + + ClusterUpdateTracker cluster_tracker(cm_, cluster_name_); + + { + EXPECT_FALSE(cluster_tracker.exists()); + EXPECT_EQ(cluster_tracker.info(), nullptr); + } + + { + // Simulate addition of an irrelevant cluster. + cluster_tracker.onClusterAddOrUpdate(irrelevant_); + + EXPECT_FALSE(cluster_tracker.exists()); + EXPECT_EQ(cluster_tracker.info(), nullptr); + } + + { + // Simulate addition of the relevant cluster. + cluster_tracker.onClusterAddOrUpdate(expected_); + + EXPECT_TRUE(cluster_tracker.exists()); + EXPECT_EQ(cluster_tracker.info(), expected_.cluster_.info_); + } + + { + // Simulate removal of an irrelevant cluster. + cluster_tracker.onClusterRemoval(irrelevant_.cluster_.info_->name_); + + EXPECT_TRUE(cluster_tracker.exists()); + EXPECT_EQ(cluster_tracker.info(), expected_.cluster_.info_); + } + + { + // Simulate removal of the relevant cluster. + cluster_tracker.onClusterRemoval(cluster_name_); + + EXPECT_FALSE(cluster_tracker.exists()); + EXPECT_EQ(cluster_tracker.info(), nullptr); + } +} + +} // namespace +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index a9aecdc9c741..23600bbfe0e5 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -13,6 +13,7 @@ #include "server/transport_socket_config_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/common/upstream/utility.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" @@ -94,8 +95,8 @@ class EdsTest : public testing::Test { Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, singleton_manager_, tls_, validation_visitor_, *api_); - cluster_.reset( - new EdsClusterImpl(eds_cluster_, runtime_, factory_context, std::move(scope), false)); + cluster_ = std::make_shared(eds_cluster_, runtime_, factory_context, + std::move(scope), false); EXPECT_EQ(initialize_phase, cluster_->initializePhase()); eds_callbacks_ = cm_.subscription_factory_.callbacks_; } @@ -113,7 +114,7 @@ class EdsTest : public testing::Test { } bool initialized_{}; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; Ssl::MockContextManager ssl_context_manager_; envoy::config::cluster::v3::Cluster eds_cluster_; NiceMock cm_; @@ -533,6 +534,37 @@ TEST_F(EdsTest, EndpointHealthStatus) { EXPECT_EQ(rebuild_container + 1, stats_.counter("cluster.name.update_no_rebuild").value()); } +// Validate that onConfigUpdate() updates the hostname. +TEST_F(EdsTest, Hostname) { + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; + auto* endpoint = cluster_load_assignment.add_endpoints()->add_lb_endpoints()->mutable_endpoint(); + auto* socket_address = endpoint->mutable_address()->mutable_socket_address(); + socket_address->set_address("1.2.3.4"); + socket_address->set_port_value(1234); + endpoint->set_hostname("foo"); + cluster_load_assignment.set_cluster_name("fare"); + initialize(); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); + auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); + EXPECT_EQ(hosts.size(), 1); + EXPECT_EQ(hosts[0]->hostname(), "foo"); +} + +TEST_F(EdsTest, UseHostnameForHealthChecks) { + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; + auto* endpoint = cluster_load_assignment.add_endpoints()->add_lb_endpoints()->mutable_endpoint(); + auto* socket_address = endpoint->mutable_address()->mutable_socket_address(); + socket_address->set_address("1.2.3.4"); + socket_address->set_port_value(1234); + endpoint->mutable_health_check_config()->set_hostname("foo"); + cluster_load_assignment.set_cluster_name("fare"); + initialize(); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); + auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); + EXPECT_EQ(hosts.size(), 1); + EXPECT_EQ(hosts[0]->hostnameForHealthChecks(), "foo"); +} + // Verify that a host is removed if it is removed from discovery, stabilized, and then later // fails active HC. TEST_F(EdsTest, EndpointRemovalAfterHcFail) { diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index a4823c3fafe4..c19007c76846 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -145,9 +145,9 @@ class HttpHealthCheckerImplTest : public testing::Test { codec_client_type: Http2 )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -169,9 +169,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -192,9 +192,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -215,9 +215,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -238,9 +238,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -262,9 +262,9 @@ class HttpHealthCheckerImplTest : public testing::Test { always_log_health_check_failures: true )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -283,9 +283,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -309,9 +309,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -331,9 +331,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -354,9 +354,9 @@ class HttpHealthCheckerImplTest : public testing::Test { )EOF", prefix); - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -376,9 +376,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -398,9 +398,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -422,9 +422,9 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -446,9 +446,9 @@ class HttpHealthCheckerImplTest : public testing::Test { )EOF", host); - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -521,9 +521,9 @@ class HttpHealthCheckerImplTest : public testing::Test { value: "%START_TIME(%s.%9f)%" )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -546,9 +546,9 @@ class HttpHealthCheckerImplTest : public testing::Test { request_headers_to_remove: ["user-agent"] )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -1026,9 +1026,9 @@ TEST_F(HttpHealthCheckerImplTest, ZeroRetryInterval) { path: /healthcheck )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -1096,9 +1096,9 @@ TEST_F(HttpHealthCheckerImplTest, TlsOptions) { EXPECT_CALL(*socket_factory, createTransportSocket(ApplicationProtocolListEq("http1"))); - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -1249,11 +1249,96 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceRegexPatternCheck) { EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); } +// This test verifies that when a hostname is set in the endpoint's HealthCheckConfig, it is used in +// the health check request. +TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValueOnTheHost) { + const std::string host = "www.envoyproxy.io"; + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig health_check_config; + health_check_config.set_hostname(host); + auto test_host = std::make_shared( + cluster_->info_, "", Network::Utility::resolveUrl("tcp://127.0.0.1:80"), nullptr, 1, + envoy::config::core::v3::Locality(), health_check_config, 0, + envoy::config::core::v3::UNKNOWN); + const std::string path = "/healthcheck"; + setupServiceValidationHC(); + // Requires non-empty `service_name` in config. + EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) + .WillOnce(Return(true)); + + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; + cluster_->info_->stats().upstream_cx_total_.inc(); + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) + .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { + EXPECT_EQ(headers.Host()->value().getStringView(), host); + EXPECT_EQ(headers.Path()->value().getStringView(), path); + })); + health_checker_->start(); + + EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.max_interval", _)); + EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.min_interval", _)) + .WillOnce(Return(45000)); + EXPECT_CALL(*test_sessions_[0]->interval_timer_, + enableTimer(std::chrono::milliseconds(45000), _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + absl::optional health_checked_cluster("locations-production-iad"); + respond(0, "200", false, false, true, false, health_checked_cluster); + EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); +} + +// This test verifies that when a hostname is set in the endpoint's HealthCheckConfig and in the +// cluster level configuration, the one in the endpoint takes priority. +TEST_F(HttpHealthCheckerImplTest, + SuccessServiceCheckWithCustomHostValueOnTheHostThatOverridesConfigValue) { + const std::string host = "www.envoyproxy.io"; + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig health_check_config; + health_check_config.set_hostname(host); + auto test_host = std::make_shared( + cluster_->info_, "", Network::Utility::resolveUrl("tcp://127.0.0.1:80"), nullptr, 1, + envoy::config::core::v3::Locality(), health_check_config, 0, + envoy::config::core::v3::UNKNOWN); + const std::string path = "/healthcheck"; + // Setup health check config with a different host, to check that we still get the host configured + // on the endpoint. + setupServiceValidationWithCustomHostValueHC("foo.com"); + // Requires non-empty `service_name` in config. + EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) + .WillOnce(Return(true)); + + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; + cluster_->info_->stats().upstream_cx_total_.inc(); + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) + .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { + EXPECT_EQ(headers.Host()->value().getStringView(), host); + EXPECT_EQ(headers.Path()->value().getStringView(), path); + })); + health_checker_->start(); + + EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.max_interval", _)); + EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.min_interval", _)) + .WillOnce(Return(45000)); + EXPECT_CALL(*test_sessions_[0]->interval_timer_, + enableTimer(std::chrono::milliseconds(45000), _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + absl::optional health_checked_cluster("locations-production-iad"); + respond(0, "200", false, false, true, false, health_checked_cluster); + EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); +} + TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValue) { const std::string host = "www.envoyproxy.io"; const std::string path = "/healthcheck"; setupServiceValidationWithCustomHostValueHC(host); - // requires non-empty `service_name` in config. + // Requires non-empty `service_name` in config. EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); @@ -1310,7 +1395,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAdditionalHeaders) { const std::string value_downstream_local_address_without_port = "127.0.0.1"; setupServiceValidationWithAdditionalHeaders(); - // requires non-empty `service_name` in config. + // Requires non-empty `service_name` in config. EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); @@ -1371,7 +1456,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAdditionalHeaders) { TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithoutUserAgent) { setupServiceValidationWithoutUserAgent(); - // requires non-empty `service_name` in config. + // Requires non-empty `service_name` in config. EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); @@ -2412,9 +2497,9 @@ class ProdHttpHealthCheckerTest : public HttpHealthCheckerImplTest { codec_client_type: Http2 )EOF"; - health_checker_.reset(new TestProdHttpHealthChecker(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -2435,9 +2520,9 @@ class ProdHttpHealthCheckerTest : public HttpHealthCheckerImplTest { path: /healthcheck )EOF"; - health_checker_.reset(new TestProdHttpHealthChecker(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -2470,9 +2555,9 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http1CodecClient)) { use_http2: false )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -2495,9 +2580,9 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http2CodecClient)) { use_http2: true )EOF"; - health_checker_.reset(new TestHttpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -2842,9 +2927,9 @@ class TcpHealthCheckerImplTest : public testing::Test { - text: "02" )EOF"; - health_checker_.reset( - new TcpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml.str()), dispatcher_, - runtime_, random_, HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml.str()), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); } void setupNoData() { @@ -2856,9 +2941,9 @@ class TcpHealthCheckerImplTest : public testing::Test { tcp_health_check: {} )EOF"; - health_checker_.reset(new TcpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); } void setupDataDontReuseConnection() { @@ -2875,9 +2960,9 @@ class TcpHealthCheckerImplTest : public testing::Test { - text: "02" )EOF"; - health_checker_.reset(new TcpHealthCheckerImpl(*cluster_, parseHealthCheckFromV2Yaml(yaml), - dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); } void expectSessionCreate() { @@ -3447,9 +3532,9 @@ class GrpcHealthCheckerImplTestBase { void setupHC() { const auto config = createGrpcHealthCheckConfig(); - health_checker_.reset(new TestGrpcHealthCheckerImpl(*cluster_, config, dispatcher_, runtime_, - random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, config, dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -3459,9 +3544,9 @@ class GrpcHealthCheckerImplTestBase { void setupHCWithUnhealthyThreshold(int value) { auto config = createGrpcHealthCheckConfig(); config.mutable_unhealthy_threshold()->set_value(value); - health_checker_.reset(new TestGrpcHealthCheckerImpl(*cluster_, config, dispatcher_, runtime_, - random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, config, dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -3474,9 +3559,9 @@ class GrpcHealthCheckerImplTestBase { if (authority.has_value()) { config.mutable_grpc_health_check()->set_authority(authority.value()); } - health_checker_.reset(new TestGrpcHealthCheckerImpl(*cluster_, config, dispatcher_, runtime_, - random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, config, dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -3486,9 +3571,9 @@ class GrpcHealthCheckerImplTestBase { void setupNoReuseConnectionHC() { auto config = createGrpcHealthCheckConfig(); config.mutable_reuse_connection()->set_value(false); - health_checker_.reset(new TestGrpcHealthCheckerImpl(*cluster_, config, dispatcher_, runtime_, - random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, config, dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -3505,9 +3590,9 @@ class GrpcHealthCheckerImplTestBase { config.mutable_interval_jitter()->set_seconds(0); config.mutable_unhealthy_threshold()->set_value(3); config.mutable_healthy_threshold()->set_value(3); - health_checker_.reset(new TestGrpcHealthCheckerImpl(*cluster_, config, dispatcher_, runtime_, - random_, - HealthCheckEventLoggerPtr(event_logger_))); + health_checker_ = std::make_shared( + *cluster_, config, dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); @@ -3667,6 +3752,11 @@ class GrpcHealthCheckerImplTestBase { cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + runHealthCheck(expected_host); + } + + void runHealthCheck(std::string expected_host) { + cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); @@ -3729,6 +3819,36 @@ class GrpcHealthCheckerImplTest : public testing::Test, public GrpcHealthChecker // Test single host check success. TEST_F(GrpcHealthCheckerImplTest, Success) { testSingleHostSuccess(absl::nullopt); } +TEST_F(GrpcHealthCheckerImplTest, SuccessWithHostname) { + std::string expected_host = "www.envoyproxy.io"; + + setupServiceNameHC(absl::nullopt); + + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig health_check_config; + health_check_config.set_hostname(expected_host); + auto test_host = std::make_shared( + cluster_->info_, "", Network::Utility::resolveUrl("tcp://127.0.0.1:80"), nullptr, 1, + envoy::config::core::v3::Locality(), health_check_config, 0, + envoy::config::core::v3::UNKNOWN); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; + runHealthCheck(expected_host); +} + +TEST_F(GrpcHealthCheckerImplTest, SuccessWithHostnameOverridesConfig) { + std::string expected_host = "www.envoyproxy.io"; + + setupServiceNameHC("foo.com"); + + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig health_check_config; + health_check_config.set_hostname(expected_host); + auto test_host = std::make_shared( + cluster_->info_, "", Network::Utility::resolveUrl("tcp://127.0.0.1:80"), nullptr, 1, + envoy::config::core::v3::Locality(), health_check_config, 0, + envoy::config::core::v3::UNKNOWN); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; + runHealthCheck(expected_host); +} + // Test single host check success with custom authority. TEST_F(GrpcHealthCheckerImplTest, SuccessWithCustomAuthority) { const std::string authority = "www.envoyproxy.io"; diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc index ef8cb518f144..f8e1177da0f9 100644 --- a/test/common/upstream/load_balancer_benchmark.cc +++ b/test/common/upstream/load_balancer_benchmark.cc @@ -40,6 +40,12 @@ class BaseTester { {}, hosts, {}, absl::nullopt); } + Envoy::Thread::MutexBasicLockable lock_; + // Reduce default log level to warn while running this benchmark to avoid problems due to + // excessive debug logging in upstream_impl.cc + Envoy::Logger::Context logging_context_{spdlog::level::warn, + Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock_, false}; + PrioritySetImpl priority_set_; PrioritySetImpl local_priority_set_; Stats::IsolatedStoreImpl stats_store_; @@ -482,18 +488,3 @@ BENCHMARK(BM_MaglevLoadBalancerWeighted) } // namespace } // namespace Upstream } // namespace Envoy - -// Boilerplate main(), which discovers benchmarks in the same file and runs them. -int main(int argc, char** argv) { - // TODO(mattklein123): Provide a common bazel benchmark wrapper much like we do for normal tests, - // fuzz, etc. - Envoy::Thread::MutexBasicLockable lock; - Envoy::Logger::Context logging_context(spdlog::level::warn, - Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); - - benchmark::Initialize(&argc, argv); - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; - } - benchmark::RunSpecifiedBenchmarks(); -} diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 406cecd31f4b..3ad095de9bc4 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -531,11 +531,11 @@ class RoundRobinLoadBalancerTest : public LoadBalancerTestBase { public: void init(bool need_local_cluster) { if (need_local_cluster) { - local_priority_set_.reset(new PrioritySetImpl()); + local_priority_set_ = std::make_shared(); local_priority_set_->getOrCreateHostSet(0); } - lb_.reset(new RoundRobinLoadBalancer(priority_set_, local_priority_set_.get(), stats_, runtime_, - random_, common_config_)); + lb_ = std::make_shared(priority_set_, local_priority_set_.get(), stats_, + runtime_, random_, common_config_); } // Updates priority 0 with the given hosts and hosts_per_locality. @@ -1562,8 +1562,8 @@ INSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, LeastRequestLoadBalancerTest, class RandomLoadBalancerTest : public LoadBalancerTestBase { public: void init() { - lb_.reset( - new RandomLoadBalancer(priority_set_, nullptr, stats_, runtime_, random_, common_config_)); + lb_ = std::make_shared(priority_set_, nullptr, stats_, runtime_, random_, + common_config_); } std::shared_ptr lb_; }; diff --git a/test/common/upstream/logical_dns_cluster_test.cc b/test/common/upstream/logical_dns_cluster_test.cc index 6a04fe274764..74154fd82510 100644 --- a/test/common/upstream/logical_dns_cluster_test.cc +++ b/test/common/upstream/logical_dns_cluster_test.cc @@ -52,8 +52,8 @@ class LogicalDnsClusterTest : public testing::Test { Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, random_, stats_store_, singleton_manager_, tls_, validation_visitor_, *api_); - cluster_.reset(new LogicalDnsCluster(cluster_config, runtime_, dns_resolver_, factory_context, - std::move(scope), false)); + cluster_ = std::make_shared(cluster_config, runtime_, dns_resolver_, + factory_context, std::move(scope), false); cluster_->prioritySet().addPriorityUpdateCb( [&](uint32_t, const HostVector&, const HostVector&) -> void { membership_updated_.ready(); diff --git a/test/common/upstream/maglev_lb_test.cc b/test/common/upstream/maglev_lb_test.cc index adc2d5bbabfb..3fce26252ac4 100644 --- a/test/common/upstream/maglev_lb_test.cc +++ b/test/common/upstream/maglev_lb_test.cc @@ -13,12 +13,25 @@ namespace { class TestLoadBalancerContext : public LoadBalancerContextBase { public: - TestLoadBalancerContext(uint64_t hash_key) : hash_key_(hash_key) {} + using HostPredicate = std::function; + + TestLoadBalancerContext(uint64_t hash_key) + : TestLoadBalancerContext(hash_key, 0, [](const Host&) { return false; }) {} + TestLoadBalancerContext(uint64_t hash_key, uint32_t retry_count, + HostPredicate should_select_another_host) + : hash_key_(hash_key), retry_count_(retry_count), + should_select_another_host_(should_select_another_host) {} // Upstream::LoadBalancerContext absl::optional computeHashKey() override { return hash_key_; } + uint32_t hostSelectionRetryCount() const override { return retry_count_; }; + bool shouldSelectAnotherHost(const Host& host) override { + return should_select_another_host_(host); + } absl::optional hash_key_; + uint32_t retry_count_; + HostPredicate should_select_another_host_; }; // Note: ThreadAwareLoadBalancer base is heavily tested by RingHashLoadBalancerTest. Only basic @@ -81,6 +94,90 @@ TEST_F(MaglevLoadBalancerTest, Basic) { } } +// Basic with hostname. +TEST_F(MaglevLoadBalancerTest, BasicWithHostName) { + host_set_.hosts_ = {makeTestHost(info_, "90", "tcp://127.0.0.1:90"), + makeTestHost(info_, "91", "tcp://127.0.0.1:91"), + makeTestHost(info_, "92", "tcp://127.0.0.1:92"), + makeTestHost(info_, "93", "tcp://127.0.0.1:93"), + makeTestHost(info_, "94", "tcp://127.0.0.1:94"), + makeTestHost(info_, "95", "tcp://127.0.0.1:95")}; + host_set_.healthy_hosts_ = host_set_.hosts_; + host_set_.runCallbacks({}, {}); + common_config_ = envoy::config::cluster::v3::Cluster::CommonLbConfig(); + auto chc = envoy::config::cluster::v3::Cluster::CommonLbConfig::ConsistentHashingLbConfig(); + chc.set_use_hostname_for_hashing(true); + common_config_.set_allocated_consistent_hashing_lb_config(&chc); + init(7); + common_config_.release_consistent_hashing_lb_config(); + + EXPECT_EQ("maglev_lb.min_entries_per_host", lb_->stats().min_entries_per_host_.name()); + EXPECT_EQ("maglev_lb.max_entries_per_host", lb_->stats().max_entries_per_host_.name()); + EXPECT_EQ(1, lb_->stats().min_entries_per_host_.value()); + EXPECT_EQ(2, lb_->stats().max_entries_per_host_.value()); + + // maglev: i=0 host=92 + // maglev: i=1 host=95 + // maglev: i=2 host=90 + // maglev: i=3 host=93 + // maglev: i=4 host=94 + // maglev: i=5 host=91 + // maglev: i=6 host=90 + LoadBalancerPtr lb = lb_->factory()->create(); + const std::vector expected_assignments{2, 5, 0, 3, 4, 1, 0}; + for (uint32_t i = 0; i < 3 * expected_assignments.size(); ++i) { + TestLoadBalancerContext context(i); + EXPECT_EQ(host_set_.hosts_[expected_assignments[i % expected_assignments.size()]], + lb->chooseHost(&context)); + } +} + +// Same ring as the Basic test, but exercise retry host predicate behavior. +TEST_F(MaglevLoadBalancerTest, BasicWithRetryHostPredicate) { + host_set_.hosts_ = { + makeTestHost(info_, "tcp://127.0.0.1:90"), makeTestHost(info_, "tcp://127.0.0.1:91"), + makeTestHost(info_, "tcp://127.0.0.1:92"), makeTestHost(info_, "tcp://127.0.0.1:93"), + makeTestHost(info_, "tcp://127.0.0.1:94"), makeTestHost(info_, "tcp://127.0.0.1:95")}; + host_set_.healthy_hosts_ = host_set_.hosts_; + host_set_.runCallbacks({}, {}); + init(7); + + EXPECT_EQ("maglev_lb.min_entries_per_host", lb_->stats().min_entries_per_host_.name()); + EXPECT_EQ("maglev_lb.max_entries_per_host", lb_->stats().max_entries_per_host_.name()); + EXPECT_EQ(1, lb_->stats().min_entries_per_host_.value()); + EXPECT_EQ(2, lb_->stats().max_entries_per_host_.value()); + + // maglev: i=0 host=127.0.0.1:92 + // maglev: i=1 host=127.0.0.1:94 + // maglev: i=2 host=127.0.0.1:90 + // maglev: i=3 host=127.0.0.1:91 + // maglev: i=4 host=127.0.0.1:95 + // maglev: i=5 host=127.0.0.1:90 + // maglev: i=6 host=127.0.0.1:93 + LoadBalancerPtr lb = lb_->factory()->create(); + { + // Confirm that i=3 is selected by the hash. + TestLoadBalancerContext context(10); + EXPECT_EQ(host_set_.hosts_[1], lb->chooseHost(&context)); + } + { + // First attempt succeeds even when retry count is > 0. + TestLoadBalancerContext context(10, 2, [](const Host&) { return false; }); + EXPECT_EQ(host_set_.hosts_[1], lb->chooseHost(&context)); + } + { + // Second attempt chooses a different host in the ring. + TestLoadBalancerContext context( + 10, 2, [&](const Host& host) { return &host == host_set_.hosts_[1].get(); }); + EXPECT_EQ(host_set_.hosts_[0], lb->chooseHost(&context)); + } + { + // Exhausted retries return the last checked host. + TestLoadBalancerContext context(10, 2, [](const Host&) { return true; }); + EXPECT_EQ(host_set_.hosts_[5], lb->chooseHost(&context)); + } +} + // Weighted sanity test. TEST_F(MaglevLoadBalancerTest, Weighted) { host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", 1), diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 608f4c31992c..72fb82425b02 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -79,8 +79,8 @@ class OriginalDstClusterTest : public testing::Test { Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( admin_, ssl_context_manager_, *scope, cm, local_info_, dispatcher_, random_, stats_store_, singleton_manager_, tls_, validation_visitor_, *api_); - cluster_.reset( - new OriginalDstCluster(cluster_config, runtime_, factory_context, std::move(scope), false)); + cluster_ = std::make_shared(cluster_config, runtime_, factory_context, + std::move(scope), false); cluster_->prioritySet().addPriorityUpdateCb( [&](uint32_t, const HostVector&, const HostVector&) -> void { membership_updated_.ready(); diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index 6eebcc0c791d..fbd4906e01fe 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -27,12 +27,25 @@ namespace { class TestLoadBalancerContext : public LoadBalancerContextBase { public: - TestLoadBalancerContext(uint64_t hash_key) : hash_key_(hash_key) {} + using HostPredicate = std::function; + + TestLoadBalancerContext(uint64_t hash_key) + : TestLoadBalancerContext(hash_key, 0, [](const Host&) { return false; }) {} + TestLoadBalancerContext(uint64_t hash_key, uint32_t retry_count, + HostPredicate should_select_another_host) + : hash_key_(hash_key), retry_count_(retry_count), + should_select_another_host_(should_select_another_host) {} // Upstream::LoadBalancerContext absl::optional computeHashKey() override { return hash_key_; } + uint32_t hostSelectionRetryCount() const override { return retry_count_; }; + bool shouldSelectAnotherHost(const Host& host) override { + return should_select_another_host_(host); + } absl::optional hash_key_; + uint32_t retry_count_; + HostPredicate should_select_another_host_; }; class RingHashLoadBalancerTest : public testing::TestWithParam { @@ -247,6 +260,146 @@ TEST_P(RingHashLoadBalancerTest, BasicWithMurmur2) { EXPECT_EQ(0UL, stats_.lb_healthy_panic_.value()); } +// Expect reasonable results with hostname. +TEST_P(RingHashLoadBalancerTest, BasicWithHostname) { + hostSet().hosts_ = {makeTestHost(info_, "90", "tcp://127.0.0.1:90"), + makeTestHost(info_, "91", "tcp://127.0.0.1:91"), + makeTestHost(info_, "92", "tcp://127.0.0.1:92"), + makeTestHost(info_, "93", "tcp://127.0.0.1:93"), + makeTestHost(info_, "94", "tcp://127.0.0.1:94"), + makeTestHost(info_, "95", "tcp://127.0.0.1:95")}; + hostSet().healthy_hosts_ = hostSet().hosts_; + hostSet().runCallbacks({}, {}); + + config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig(); + config_.value().mutable_minimum_ring_size()->set_value(12); + + common_config_ = envoy::config::cluster::v3::Cluster::CommonLbConfig(); + auto chc = envoy::config::cluster::v3::Cluster::CommonLbConfig::ConsistentHashingLbConfig(); + chc.set_use_hostname_for_hashing(true); + common_config_.set_allocated_consistent_hashing_lb_config(&chc); + + init(); + common_config_.release_consistent_hashing_lb_config(); + + EXPECT_EQ("ring_hash_lb.size", lb_->stats().size_.name()); + EXPECT_EQ("ring_hash_lb.min_hashes_per_host", lb_->stats().min_hashes_per_host_.name()); + EXPECT_EQ("ring_hash_lb.max_hashes_per_host", lb_->stats().max_hashes_per_host_.name()); + EXPECT_EQ(12, lb_->stats().size_.value()); + EXPECT_EQ(2, lb_->stats().min_hashes_per_host_.value()); + EXPECT_EQ(2, lb_->stats().max_hashes_per_host_.value()); + + // hash ring: + // host | position + // --------------------------- + // 95 | 1975508444536362413 + // 95 | 2376063919839173711 + // 93 | 2386806903309390596 + // 94 | 6749904478991551885 + // 93 | 6803900775736438537 + // 92 | 7225015537174310577 + // 90 | 8787465352164086522 + // 92 | 11282020843382717940 + // 91 | 13723418369486627818 + // 90 | 13776502110861797421 + // 91 | 14338313586354474791 + // 94 | 15364271037087512980 + + LoadBalancerPtr lb = lb_->factory()->create(); + { + TestLoadBalancerContext context(0); + EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context)); + } + { + TestLoadBalancerContext context(std::numeric_limits::max()); + EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context)); + } + { + TestLoadBalancerContext context(7225015537174310577); + EXPECT_EQ(hostSet().hosts_[2], lb->chooseHost(&context)); + } + { + TestLoadBalancerContext context(6803900775736438537); + EXPECT_EQ(hostSet().hosts_[3], lb->chooseHost(&context)); + } + { EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(nullptr)); } + EXPECT_EQ(0UL, stats_.lb_healthy_panic_.value()); + + hostSet().healthy_hosts_.clear(); + hostSet().runCallbacks({}, {}); + lb = lb_->factory()->create(); + { + TestLoadBalancerContext context(0); + EXPECT_EQ(hostSet().hosts_[5], lb->chooseHost(&context)); + } + EXPECT_EQ(1UL, stats_.lb_healthy_panic_.value()); +} + +// Test the same ring as Basic but exercise retry host predicate behavior. +TEST_P(RingHashLoadBalancerTest, BasicWithRetryHostPredicate) { + hostSet().hosts_ = { + makeTestHost(info_, "tcp://127.0.0.1:90"), makeTestHost(info_, "tcp://127.0.0.1:91"), + makeTestHost(info_, "tcp://127.0.0.1:92"), makeTestHost(info_, "tcp://127.0.0.1:93"), + makeTestHost(info_, "tcp://127.0.0.1:94"), makeTestHost(info_, "tcp://127.0.0.1:95")}; + hostSet().healthy_hosts_ = hostSet().hosts_; + hostSet().runCallbacks({}, {}); + + config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig(); + config_.value().mutable_minimum_ring_size()->set_value(12); + + init(); + EXPECT_EQ("ring_hash_lb.size", lb_->stats().size_.name()); + EXPECT_EQ("ring_hash_lb.min_hashes_per_host", lb_->stats().min_hashes_per_host_.name()); + EXPECT_EQ("ring_hash_lb.max_hashes_per_host", lb_->stats().max_hashes_per_host_.name()); + EXPECT_EQ(12, lb_->stats().size_.value()); + EXPECT_EQ(2, lb_->stats().min_hashes_per_host_.value()); + EXPECT_EQ(2, lb_->stats().max_hashes_per_host_.value()); + + // hash ring: + // port | position + // --------------------------- + // :94 | 833437586790550860 + // :92 | 928266305478181108 + // :90 | 1033482794131418490 + // :95 | 3551244743356806947 + // :93 | 3851675632748031481 + // :91 | 5583722120771150861 + // :91 | 6311230543546372928 + // :93 | 7700377290971790572 + // :95 | 13144177310400110813 + // :92 | 13444792449719432967 + // :94 | 15516499411664133160 + // :90 | 16117243373044804889 + + LoadBalancerPtr lb = lb_->factory()->create(); + { + // Proof that we know which host will be selected. + TestLoadBalancerContext context(0); + EXPECT_EQ(hostSet().hosts_[4], lb->chooseHost(&context)); + } + { + // First attempt succeeds even when retry count is > 0. + TestLoadBalancerContext context(0, 2, [](const Host&) { return false; }); + EXPECT_EQ(hostSet().hosts_[4], lb->chooseHost(&context)); + } + { + // Second attempt chooses the next host in the ring. + TestLoadBalancerContext context( + 0, 2, [&](const Host& host) { return &host == hostSet().hosts_[4].get(); }); + EXPECT_EQ(hostSet().hosts_[2], lb->chooseHost(&context)); + } + { + // Exhausted retries return the last checked host. + TestLoadBalancerContext context(0, 2, [](const Host&) { return true; }); + EXPECT_EQ(hostSet().hosts_[0], lb->chooseHost(&context)); + } + { + // Retries wrap around the ring. + TestLoadBalancerContext context(0, 13, [](const Host&) { return true; }); + EXPECT_EQ(hostSet().hosts_[2], lb->chooseHost(&context)); + } +} + // Given 2 hosts and a minimum ring size of 3, expect 2 hashes per host and a ring size of 4. TEST_P(RingHashLoadBalancerTest, UnevenHosts) { hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index 39df196afb9a..5c4cf7c5faf5 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -187,9 +187,9 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { configureHostSet(failover_host_metadata, *priority_set_.getMockHostSet(1)); } - lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, stats_store_, - runtime_, random_, subset_info_, ring_hash_lb_config_, - least_request_lb_config_, common_config_)); + lb_ = std::make_shared( + lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, + ring_hash_lb_config_, least_request_lb_config_, common_config_); } void zoneAwareInit(const std::vector& host_metadata_per_locality, @@ -214,7 +214,7 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; - local_hosts_.reset(new HostVector()); + local_hosts_ = std::make_shared(); std::vector local_hosts_per_locality_vector; for (const auto& local_host_metadata : local_host_metadata_per_locality) { HostVector local_locality_hosts; @@ -236,9 +236,9 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { std::make_shared(), HostsPerLocalityImpl::empty()), {}, {}, {}, absl::nullopt); - lb_.reset(new SubsetLoadBalancer( + lb_ = std::make_shared( lb_type_, priority_set_, &local_priority_set_, stats_, stats_store_, runtime_, random_, - subset_info_, ring_hash_lb_config_, least_request_lb_config_, common_config_)); + subset_info_, ring_hash_lb_config_, least_request_lb_config_, common_config_); } HostSharedPtr makeHost(const std::string& url, const HostMetadata& metadata) { @@ -1291,9 +1291,9 @@ TEST_F(SubsetLoadBalancerTest, IgnoresHostsWithoutMetadata) { host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; - lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, - random_, subset_info_, ring_hash_lb_config_, - least_request_lb_config_, common_config_)); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + least_request_lb_config_, common_config_); TestLoadBalancerContext context_version({{"version", "1.0"}}); @@ -1710,9 +1710,9 @@ TEST_F(SubsetLoadBalancerTest, DisabledLocalityWeightAwareness) { }, host_set_, {1, 100}); - lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, - random_, subset_info_, ring_hash_lb_config_, - least_request_lb_config_, common_config_)); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + least_request_lb_config_, common_config_); TestLoadBalancerContext context({{"version", "1.1"}}); @@ -1733,9 +1733,9 @@ TEST_F(SubsetLoadBalancerTest, DoesNotCheckHostHealth) { EXPECT_CALL(*mock_host, weight()).WillRepeatedly(Return(1)); - lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, - random_, subset_info_, ring_hash_lb_config_, - least_request_lb_config_, common_config_)); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + least_request_lb_config_, common_config_); } TEST_F(SubsetLoadBalancerTest, EnabledLocalityWeightAwareness) { @@ -1756,9 +1756,9 @@ TEST_F(SubsetLoadBalancerTest, EnabledLocalityWeightAwareness) { }, host_set_, {1, 100}); - lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, - random_, subset_info_, ring_hash_lb_config_, - least_request_lb_config_, common_config_)); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + least_request_lb_config_, common_config_); TestLoadBalancerContext context({{"version", "1.1"}}); @@ -1791,9 +1791,9 @@ TEST_F(SubsetLoadBalancerTest, EnabledScaleLocalityWeights) { }, host_set_, {50, 50}); - lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, - random_, subset_info_, ring_hash_lb_config_, - least_request_lb_config_, common_config_)); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + least_request_lb_config_, common_config_); TestLoadBalancerContext context({{"version", "1.1"}}); // Since we scale the locality weights by number of hosts removed, we expect to see the second @@ -1836,9 +1836,9 @@ TEST_F(SubsetLoadBalancerTest, EnabledScaleLocalityWeightsRounding) { }, host_set_, {2, 2}); - lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, - random_, subset_info_, ring_hash_lb_config_, - least_request_lb_config_, common_config_)); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + least_request_lb_config_, common_config_); TestLoadBalancerContext context({{"version", "1.0"}}); // We expect to see a 33/66 split because 2 * 1 / 2 = 1 and 2 * 3 / 4 = 1.5 -> 2 @@ -1868,9 +1868,9 @@ TEST_F(SubsetLoadBalancerTest, ScaleLocalityWeightsWithNoLocalityWeights) { }, host_set_); - lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, - random_, subset_info_, ring_hash_lb_config_, - least_request_lb_config_, common_config_)); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + least_request_lb_config_, common_config_); } TEST_P(SubsetLoadBalancerTest, GaugesUpdatedOnDestroy) { diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h index 956ca894dd99..a71d276471fa 100644 --- a/test/common/upstream/test_cluster_manager.h +++ b/test/common/upstream/test_cluster_manager.h @@ -24,6 +24,7 @@ #include "extensions/transport_sockets/tls/context_manager_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/common/upstream/utility.h" #include "test/integration/clusters/custom_static_cluster.h" #include "test/mocks/access_log/mocks.h" @@ -120,7 +121,7 @@ class TestClusterManagerFactory : public ClusterManagerFactory { Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api)); MOCK_METHOD(CdsApi*, createCds_, ()); - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; NiceMock tls_; std::shared_ptr> dns_resolver_{ new NiceMock}; diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 1f03cfc21bc4..a041dbdd72c1 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -22,6 +22,7 @@ #include "server/transport_socket_config_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/common/upstream/utility.h" #include "test/mocks/common.h" #include "test/mocks/local_info/mocks.h" @@ -58,7 +59,7 @@ class UpstreamImplTestBase { NiceMock dispatcher_; NiceMock runtime_; NiceMock random_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock tls_; NiceMock validation_visitor_; @@ -1188,6 +1189,16 @@ TEST(HostImplTest, HealthPipeAddress) { EnvoyException, "Invalid host configuration: non-zero port for non-IP address"); } +// Test that hostname flag from the health check config propagates. +TEST(HostImplTest, HealthcheckHostname) { + std::shared_ptr info{new NiceMock()}; + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig config; + config.set_hostname("foo"); + HostDescriptionImpl descr(info, "", Network::Utility::resolveUrl("tcp://1.2.3.4:80"), nullptr, + envoy::config::core::v3::Locality().default_instance(), config, 1); + EXPECT_EQ("foo", descr.hostnameForHealthChecks()); +} + class StaticClusterImplTest : public testing::Test, public UpstreamImplTestBase {}; TEST_F(StaticClusterImplTest, InitialHosts) { @@ -1253,6 +1264,77 @@ TEST_F(StaticClusterImplTest, LoadAssignmentEmptyHostname) { EXPECT_FALSE(cluster.info()->addedViaApi()); } +TEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostname) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + hostname: foo + address: + socket_address: + address: 10.0.0.1 + port_value: 443 + health_check_config: + port_value: 8000 + )EOF"; + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_); + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); + cluster.initialize([] {}); + + EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); + EXPECT_EQ("foo", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname()); + EXPECT_FALSE(cluster.info()->addedViaApi()); +} + +TEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostnameWithHealthChecks) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + hostname: foo + address: + socket_address: + address: 10.0.0.1 + port_value: 443 + health_check_config: + port_value: 8000 + hostname: "foo2" + )EOF"; + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_); + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false); + cluster.initialize([] {}); + + EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); + EXPECT_EQ("foo", cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostname()); + EXPECT_EQ("foo2", + cluster.prioritySet().hostSetsPerPriority()[0]->hosts()[0]->hostnameForHealthChecks()); + EXPECT_FALSE(cluster.info()->addedViaApi()); +} + TEST_F(StaticClusterImplTest, LoadAssignmentMultiplePriorities) { const std::string yaml = R"EOF( name: staticcluster @@ -1952,7 +2034,7 @@ class ClusterInfoImplTest : public testing::Test { *factory_context_, std::move(scope_), false); } - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; Ssl::MockContextManager ssl_context_manager_; std::shared_ptr dns_resolver_{new NiceMock()}; NiceMock dispatcher_; diff --git a/test/common/upstream/utility.h b/test/common/upstream/utility.h index c1ef6c16bdd9..a181985b1c07 100644 --- a/test/common/upstream/utility.h +++ b/test/common/upstream/utility.h @@ -66,6 +66,15 @@ inline envoy::config::cluster::v3::Cluster defaultStaticCluster(const std::strin return parseClusterFromV2Json(defaultStaticClusterJson(name)); } +inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& hostname, + const std::string& url, uint32_t weight = 1) { + return HostSharedPtr{ + new HostImpl(cluster, hostname, Network::Utility::resolveUrl(url), nullptr, weight, + envoy::config::core::v3::Locality(), + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0, + envoy::config::core::v3::UNKNOWN)}; +} + inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url, uint32_t weight = 1) { return HostSharedPtr{ diff --git a/test/config/utility.cc b/test/config/utility.cc index be94659d43ee..8ad579e62c1c 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -30,7 +30,8 @@ namespace Envoy { -const std::string ConfigHelper::BASE_CONFIG = R"EOF( +std::string ConfigHelper::baseConfig() { + return R"EOF( admin: access_log_path: /dev/null address: @@ -68,8 +69,10 @@ const std::string ConfigHelper::BASE_CONFIG = R"EOF( address: 127.0.0.1 port_value: 0 )EOF"; +} -const std::string ConfigHelper::BASE_UDP_LISTENER_CONFIG = R"EOF( +std::string ConfigHelper::baseUdpListenerConfig() { + return R"EOF( admin: access_log_path: /dev/null address: @@ -96,8 +99,10 @@ const std::string ConfigHelper::BASE_UDP_LISTENER_CONFIG = R"EOF( port_value: 0 protocol: udp )EOF"; +} -const std::string ConfigHelper::TCP_PROXY_CONFIG = BASE_CONFIG + R"EOF( +std::string ConfigHelper::tcpProxyConfig() { + return absl::StrCat(baseConfig(), R"EOF( filter_chains: filters: name: tcp @@ -105,9 +110,18 @@ const std::string ConfigHelper::TCP_PROXY_CONFIG = BASE_CONFIG + R"EOF( "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy stat_prefix: tcp_stats cluster: cluster_0 +)EOF"); +} + +std::string ConfigHelper::tlsInspectorFilter() { + return R"EOF( +name: "envoy.filters.listener.tls_inspector" +typed_config: )EOF"; +} -const std::string ConfigHelper::HTTP_PROXY_CONFIG = BASE_CONFIG + R"EOF( +std::string ConfigHelper::httpProxyConfig() { + return absl::StrCat(baseConfig(), R"EOF( filter_chains: filters: name: http @@ -134,12 +148,14 @@ const std::string ConfigHelper::HTTP_PROXY_CONFIG = BASE_CONFIG + R"EOF( prefix: "/" domains: "*" name: route_config_0 -)EOF"; +)EOF"); +} // TODO(danzh): For better compatibility with HTTP integration test framework, // it's better to combine with HTTP_PROXY_CONFIG, and use config modifiers to // specify quic specific things. -const std::string ConfigHelper::QUIC_HTTP_PROXY_CONFIG = BASE_UDP_LISTENER_CONFIG + R"EOF( +std::string ConfigHelper::quicHttpProxyConfig() { + return absl::StrCat(baseUdpListenerConfig(), R"EOF( filter_chains: transport_socket: name: envoy.transport_sockets.quic @@ -170,34 +186,38 @@ const std::string ConfigHelper::QUIC_HTTP_PROXY_CONFIG = BASE_UDP_LISTENER_CONFI name: route_config_0 udp_listener_config: udp_listener_name: "quiche_quic_listener" -)EOF"; +)EOF"); +} -const std::string ConfigHelper::DEFAULT_BUFFER_FILTER = - R"EOF( +std::string ConfigHelper::defaultBufferFilter() { + return R"EOF( name: buffer typed_config: "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer max_request_bytes : 5242880 )EOF"; +} -const std::string ConfigHelper::SMALL_BUFFER_FILTER = - R"EOF( +std::string ConfigHelper::smallBufferFilter() { + return R"EOF( name: buffer typed_config: "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer max_request_bytes : 1024 )EOF"; +} -const std::string ConfigHelper::DEFAULT_HEALTH_CHECK_FILTER = - R"EOF( +std::string ConfigHelper::defaultHealthCheckFilter() { + return R"EOF( name: health_check typed_config: "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck pass_through_mode: false )EOF"; +} -const std::string ConfigHelper::DEFAULT_SQUASH_FILTER = - R"EOF( +std::string ConfigHelper::defaultSquashFilter() { + return R"EOF( name: squash typed_config: "@type": type.googleapis.com/envoy.config.filter.http.squash.v2.Squash @@ -217,6 +237,7 @@ name: squash seconds: 1 nanos: 0 )EOF"; +} // TODO(fredlas) set_node_on_first_message_only was true; the delta+SotW unification // work restores it here. @@ -391,6 +412,27 @@ ConfigHelper::ConfigHelper(const Network::Address::IpVersion version, Api::Api& } } +void ConfigHelper::addClusterFilterMetadata(absl::string_view metadata_yaml, + absl::string_view cluster_name) { + RELEASE_ASSERT(!finalized_, ""); + ProtobufWkt::Struct cluster_metadata; + TestUtility::loadFromYaml(std::string(metadata_yaml), cluster_metadata); + + auto* static_resources = bootstrap_.mutable_static_resources(); + for (int i = 0; i < static_resources->clusters_size(); ++i) { + auto* cluster = static_resources->mutable_clusters(i); + if (cluster->name() != cluster_name) { + continue; + } + for (const auto& kvp : cluster_metadata.fields()) { + ASSERT_TRUE(kvp.second.kind_case() == ProtobufWkt::Value::KindCase::kStructValue); + cluster->mutable_metadata()->mutable_filter_metadata()->insert( + {kvp.first, kvp.second.struct_value()}); + } + break; + } +} + void ConfigHelper::applyConfigModifiers() { for (const auto& config_modifier : config_modifiers_) { config_modifier(bootstrap_); @@ -712,6 +754,24 @@ bool ConfigHelper::setAccessLog(const std::string& filename, absl::string_view f return true; } +bool ConfigHelper::setListenerAccessLog(const std::string& filename, absl::string_view format) { + RELEASE_ASSERT(!finalized_, ""); + if (bootstrap_.mutable_static_resources()->listeners_size() == 0) { + return false; + } + envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config; + if (!format.empty()) { + access_log_config.set_format(std::string(format)); + } + access_log_config.set_path(filename); + bootstrap_.mutable_static_resources() + ->mutable_listeners(0) + ->add_access_log() + ->mutable_typed_config() + ->PackFrom(access_log_config); + return true; +} + void ConfigHelper::initializeTls( const ServerSslOptions& options, envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& common_tls_context) { @@ -783,6 +843,18 @@ void ConfigHelper::addNetworkFilter(const std::string& filter_yaml) { } } +void ConfigHelper::addListenerFilter(const std::string& filter_yaml) { + RELEASE_ASSERT(!finalized_, ""); + auto* listener = bootstrap_.mutable_static_resources()->mutable_listeners(0); + auto* filter_list_back = listener->add_listener_filters(); + TestUtility::loadFromYaml(filter_yaml, *filter_list_back); + + // Now move it to the front. + for (int i = listener->listener_filters_size() - 1; i > 0; --i) { + listener->mutable_listener_filters()->SwapElements(i, i - 1); + } +} + bool ConfigHelper::loadHttpConnectionManager( envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) { RELEASE_ASSERT(!finalized_, ""); diff --git a/test/config/utility.h b/test/config/utility.h index 785f3a050fed..77f0553a7bbf 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -60,7 +60,7 @@ class ConfigHelper { // By default, this runs with an L7 proxy config, but config can be set to TCP_PROXY_CONFIG // to test L4 proxying. ConfigHelper(const Network::Address::IpVersion version, Api::Api& api, - const std::string& config = HTTP_PROXY_CONFIG); + const std::string& config = httpProxyConfig()); static void initializeTls(const ServerSslOptions& options, @@ -71,25 +71,28 @@ class ConfigHelper { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager&)>; // A basic configuration (admin port, cluster_0, one listener) with no network filters. - static const std::string BASE_CONFIG; + static std::string baseConfig(); // A basic configuration (admin port, cluster_0, one udp listener) with no network filters. - static const std::string BASE_UDP_LISTENER_CONFIG; + static std::string baseUdpListenerConfig(); + + // A string for a tls inspector listener filter which can be used with addListenerFilter() + static std::string tlsInspectorFilter(); // A basic configuration for L4 proxying. - static const std::string TCP_PROXY_CONFIG; + static std::string tcpProxyConfig(); // A basic configuration for L7 proxying. - static const std::string HTTP_PROXY_CONFIG; + static std::string httpProxyConfig(); // A basic configuration for L7 proxying with QUIC transport. - static const std::string QUIC_HTTP_PROXY_CONFIG; + static std::string quicHttpProxyConfig(); // A string for a basic buffer filter, which can be used with addFilter() - static const std::string DEFAULT_BUFFER_FILTER; + static std::string defaultBufferFilter(); // A string for a small buffer filter, which can be used with addFilter() - static const std::string SMALL_BUFFER_FILTER; - // a string for a health check filter which can be used with addFilter() - static const std::string DEFAULT_HEALTH_CHECK_FILTER; - // a string for a squash filter which can be used with addFilter() - static const std::string DEFAULT_SQUASH_FILTER; + static std::string smallBufferFilter(); + // A string for a health check filter which can be used with addFilter() + static std::string defaultHealthCheckFilter(); + // A string for a squash filter which can be used with addFilter() + static std::string defaultSquashFilter(); // Configuration for L7 proxying, with clusters cluster_1 and cluster_2 meant to be added via CDS. // api_type should be REST, GRPC, or DELTA_GRPC. @@ -140,6 +143,9 @@ class ConfigHelper { // Add a network filter prior to existing filters. void addNetworkFilter(const std::string& filter_yaml); + // Add a listener filter prior to existing filters. + void addListenerFilter(const std::string& filter_yaml); + // Sets the client codec to the specified type. void setClientCodec(envoy::extensions::filters::network::http_connection_manager::v3:: HttpConnectionManager::CodecType type); @@ -152,6 +158,9 @@ class ConfigHelper { // /dev/null. bool setAccessLog(const std::string& filename, absl::string_view format = ""); + // Set the listener access log for the first listener to a given file. + bool setListenerAccessLog(const std::string& filename, absl::string_view format = ""); + // Renames the first listener to the name specified. void renameListener(const std::string& name); @@ -183,6 +192,10 @@ class ConfigHelper { // Add this key value pair to the static runtime. void addRuntimeOverride(const std::string& key, const std::string& value); + // Add filter_metadata to a cluster with the given name + void addClusterFilterMetadata(absl::string_view metadata_yaml, + absl::string_view cluster_name = "cluster_0"); + private: // Load the first HCM struct from the first listener into a parsed proto. bool loadHttpConnectionManager( diff --git a/test/coverage/gen_build.sh b/test/coverage/gen_build.sh index 02b98322ae45..c1a2352a5ee2 100755 --- a/test/coverage/gen_build.sh +++ b/test/coverage/gen_build.sh @@ -31,13 +31,16 @@ else COVERAGE_TARGETS=//test/... fi -for target in ${COVERAGE_TARGETS}; do - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${REPOSITORY}${target})" | grep "^//")" -done +# This setting allows consuming projects to only run coverage over private extensions. +if [[ -z "${ONLY_EXTRA_QUERY_PATHS}" ]]; then + for target in ${COVERAGE_TARGETS}; do + TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${REPOSITORY}${target})" | grep "^//")" + done -# Run the QUICHE platform api tests for coverage. -if [[ "${COVERAGE_TARGETS}" == "//test/..." ]]; then - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', '@com_googlesource_quiche//:all')" | grep "^@com_googlesource_quiche")" + # Run the QUICHE platform api tests for coverage. + if [[ "${COVERAGE_TARGETS}" == "//test/..." ]]; then + TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', '@com_googlesource_quiche//:all')" | grep "^@com_googlesource_quiche")" + fi fi if [ -n "${EXTRA_QUERY_PATHS}" ]; then diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index b34207ccd5b5..ac625f349a07 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -392,7 +392,7 @@ TEST_P(MainCommonTest, ConstructDestructLogger) { VERBOSE_EXPECT_NO_THROW(MainCommon main_common(argc(), argv())); const std::string logger_name = "logger"; - spdlog::details::log_msg log_msg(&logger_name, spdlog::level::level_enum::err, "error"); + spdlog::details::log_msg log_msg(logger_name, spdlog::level::level_enum::err, "error"); Logger::Registry::getSink()->log(log_msg); } diff --git a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc index ac788113cff0..a83f27484641 100644 --- a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc @@ -28,7 +28,7 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT public BaseIntegrationTest { public: TcpGrpcAccessLogIntegrationTest() - : BaseIntegrationTest(ipVersion(), ConfigHelper::TCP_PROXY_CONFIG) { + : BaseIntegrationTest(ipVersion(), ConfigHelper::tcpProxyConfig()) { enable_half_close_ = true; } @@ -54,12 +54,7 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - auto* filter_chain = listener->mutable_filter_chains(0); - auto* config_blob = filter_chain->mutable_filters(0)->mutable_typed_config(); - auto tcp_proxy_config = - MessageUtil::anyConvert( - *config_blob); - auto* access_log = tcp_proxy_config.add_access_log(); + auto* access_log = listener->add_access_log(); access_log->set_name("grpc_accesslog"); envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig access_log_config; auto* common_config = access_log_config.mutable_common_config(); @@ -67,7 +62,6 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT setGrpcService(*common_config->mutable_grpc_service(), "accesslog", fake_upstreams_.back()->localAddress()); access_log->mutable_typed_config()->PackFrom(access_log_config); - config_blob->PackFrom(tcp_proxy_config); }); BaseIntegrationTest::initialize(); } diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index e01bc47bb62e..27fabee1128c 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -101,12 +101,12 @@ class RedisClusterTest : public testing::Test, ProtobufWkt::Struct::default_instance(), ProtobufMessage::getStrictValidationVisitor(), config); cluster_callback_ = std::make_shared>(); - cluster_.reset(new RedisCluster( + cluster_ = std::make_shared( cluster_config, TestUtility::downcastAndValidate( config), *this, cm, runtime_, *api_, dns_resolver_, factory_context, std::move(scope), false, - cluster_callback_)); + cluster_callback_); // This allows us to create expectation on cluster slot response without waiting for // makeRequest. pool_callbacks_ = &cluster_->redis_discovery_session_; diff --git a/test/extensions/common/aws/aws_metadata_fetcher_integration_test.cc b/test/extensions/common/aws/aws_metadata_fetcher_integration_test.cc index 345b291a964d..499dec138ce9 100644 --- a/test/extensions/common/aws/aws_metadata_fetcher_integration_test.cc +++ b/test/extensions/common/aws/aws_metadata_fetcher_integration_test.cc @@ -17,7 +17,8 @@ class AwsMetadataIntegrationTestBase : public ::testing::Test, public BaseIntegr : BaseIntegrationTest(Network::Address::IpVersion::v4, renderConfig(status_code, delay_s)) {} static std::string renderConfig(int status_code, int delay_s) { - return fmt::format(ConfigHelper::BASE_CONFIG + R"EOF( + return absl::StrCat(ConfigHelper::baseConfig(), + fmt::format(R"EOF( filter_chains: filters: name: http @@ -66,7 +67,7 @@ class AwsMetadataIntegrationTestBase : public ::testing::Test, public BaseIntegr domains: "*" name: route_config_0 )EOF", - delay_s, delay_s > 0 ? 0 : 1000, status_code, status_code); + delay_s, delay_s > 0 ? 0 : 1000, status_code, status_code)); } void SetUp() override { BaseIntegrationTest::initialize(); } diff --git a/test/extensions/common/proxy_protocol/BUILD b/test/extensions/common/proxy_protocol/BUILD new file mode 100644 index 000000000000..bd269493ddf5 --- /dev/null +++ b/test/extensions/common/proxy_protocol/BUILD @@ -0,0 +1,40 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +envoy_package() + +envoy_cc_test( + name = "proxy_protocol_header_test", + srcs = ["proxy_protocol_header_test.cc"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_cc_test( + name = "proxy_protocol_regression_test", + srcs = ["proxy_protocol_regression_test.cc"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/event:dispatcher_includes", + "//source/common/event:dispatcher_lib", + "//source/common/network:connection_balancer_lib", + "//source/common/network:listener_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + "//source/extensions/filters/listener/proxy_protocol:proxy_protocol_lib", + "//source/server:connection_handler_lib", + "//test/mocks/buffer:buffer_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/server:server_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc new file mode 100644 index 000000000000..052544a4a99a --- /dev/null +++ b/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc @@ -0,0 +1,99 @@ +#include "envoy/network/address.h" + +#include "common/buffer/buffer_impl.h" + +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" + +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace ProxyProtocol { +namespace { + +TEST(ProxyProtocolHeaderTest, GeneratesV1IPv4Header) { + const auto expectedHeaderStr = "PROXY TCP4 174.2.2.222 172.0.0.1 50000 80\r\n"; + const Buffer::OwnedImpl expectedBuff(expectedHeaderStr); + const auto src_addr = "174.2.2.222"; + const auto dst_addr = "172.0.0.1"; + const auto src_port = 50000; + const auto dst_port = 80; + const auto version = Network::Address::IpVersion::v4; + Buffer::OwnedImpl buff{}; + + generateV1Header(src_addr, dst_addr, src_port, dst_port, version, buff); + + EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff)); +} + +TEST(ProxyProtocolHeaderTest, GeneratesV1IPv6Header) { + const auto expectedHeaderStr = "PROXY TCP6 1::2:3 a:b:c:d:: 50000 80\r\n"; + const Buffer::OwnedImpl expectedBuff(expectedHeaderStr); + const auto src_addr = "1::2:3"; + const auto dst_addr = "a:b:c:d::"; + const auto src_port = 50000; + const auto dst_port = 80; + const auto version = Network::Address::IpVersion::v6; + Buffer::OwnedImpl buff{}; + + generateV1Header(src_addr, dst_addr, src_port, dst_port, version, buff); + + EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff)); +} + +TEST(ProxyProtocolHeaderTest, GeneratesV2IPv4Header) { + const uint8_t v2_protocol[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x02, 0x01}; + const Buffer::OwnedImpl expectedBuff(v2_protocol, sizeof(v2_protocol)); + const auto src_addr = "1.2.3.4"; + const auto dst_addr = "0.1.1.2"; + const auto src_port = 773; + const auto dst_port = 513; + const auto version = Network::Address::IpVersion::v4; + Buffer::OwnedImpl buff{}; + + generateV2Header(src_addr, dst_addr, src_port, dst_port, version, buff); + + EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff)); +} + +TEST(ProxyProtocolHeaderTest, GeneratesV2IPv6Header) { + const uint8_t v2_protocol[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, + 0x0a, 0x21, 0x21, 0x00, 0x24, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x02}; + const Buffer::OwnedImpl expectedBuff(v2_protocol, sizeof(v2_protocol)); + const auto src_addr = "1:2:3::4"; + const auto dst_addr = "1:100:200:3::"; + const auto src_port = 8; + const auto dst_port = 2; + const auto version = Network::Address::IpVersion::v6; + Buffer::OwnedImpl buff{}; + + generateV2Header(src_addr, dst_addr, src_port, dst_port, version, buff); + + EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff)); +} + +TEST(ProxyProtocolHeaderTest, GeneratesV2LocalHeader) { + const uint8_t v2_protocol[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, + 0x55, 0x49, 0x54, 0x0a, 0x20, 0x00, 0x00, 0x00}; + const Buffer::OwnedImpl expectedBuff(v2_protocol, sizeof(v2_protocol)); + Buffer::OwnedImpl buff{}; + + generateV2LocalHeader(buff); + + EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff)); +} + +} // namespace +} // namespace ProxyProtocol +} // namespace Common +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc new file mode 100644 index 000000000000..25ed240099d5 --- /dev/null +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -0,0 +1,244 @@ +#include "envoy/network/address.h" + +#include "common/buffer/buffer_impl.h" +#include "common/event/dispatcher_impl.h" +#include "common/network/connection_balancer_impl.h" +#include "common/network/listen_socket_impl.h" + +#include "server/connection_handler_impl.h" + +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" +#include "extensions/filters/listener/proxy_protocol/proxy_protocol.h" + +#include "test/mocks/buffer/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Invoke; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace ProxyProtocol { +namespace { + +/** + * Regression tests for testing that the PROXY protocol listener filter can correctly read + * what the PROXY protocol util functions generate + */ +class ProxyProtocolRegressionTest : public testing::TestWithParam, + public Network::ListenerConfig, + public Network::FilterChainManager, + protected Logger::Loggable { +public: + ProxyProtocolRegressionTest() + : api_(Api::createApiForTest(stats_store_)), dispatcher_(api_->allocateDispatcher()), + socket_(std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true)), + connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, "test_thread")), + name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()) { + EXPECT_CALL(socket_factory_, socketType()) + .WillOnce(Return(Network::Address::SocketType::Stream)); + EXPECT_CALL(socket_factory_, localAddress()).WillOnce(ReturnRef(socket_->localAddress())); + EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); + connection_handler_->addListener(*this); + conn_ = dispatcher_->createClientConnection(socket_->localAddress(), + Network::Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr); + conn_->addConnectionCallbacks(connection_callbacks_); + } + + // Network::ListenerConfig + Network::FilterChainManager& filterChainManager() override { return *this; } + Network::FilterChainFactory& filterChainFactory() override { return factory_; } + Network::ListenSocketFactory& listenSocketFactory() override { return socket_factory_; } + bool bindToPort() override { return true; } + bool handOffRestoredDestinationConnections() const override { return false; } + uint32_t perConnectionBufferLimitBytes() const override { return 0; } + std::chrono::milliseconds listenerFiltersTimeout() const override { return {}; } + bool continueOnListenerFiltersTimeout() const override { return false; } + Stats::Scope& listenerScope() override { return stats_store_; } + uint64_t listenerTag() const override { return 1; } + const std::string& name() const override { return name_; } + Network::ActiveUdpListenerFactory* udpListenerFactory() override { return nullptr; } + envoy::config::core::v3::TrafficDirection direction() const override { + return envoy::config::core::v3::UNSPECIFIED; + } + Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; } + const std::vector& accessLogs() const override { + return empty_access_logs_; + } + + // Network::FilterChainManager + const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override { + return filter_chain_.get(); + } + + void connect(bool read = true) { + int expected_callbacks = 2; + auto maybeExitDispatcher = [&]() -> void { + expected_callbacks--; + if (expected_callbacks == 0) { + dispatcher_->exit(); + } + }; + + EXPECT_CALL(factory_, createListenerFilterChain(_)) + .WillOnce(Invoke([&](Network::ListenerFilterManager& filter_manager) -> bool { + filter_manager.addAcceptFilter( + nullptr, + std::make_unique( + std::make_shared(listenerScope()))); + maybeExitDispatcher(); + return true; + })); + conn_->connect(); + if (read) { + read_filter_ = std::make_shared>(); + EXPECT_CALL(factory_, createNetworkFilterChain(_, _)) + .WillOnce(Invoke([&](Network::Connection& connection, + const std::vector&) -> bool { + server_connection_ = &connection; + connection.addConnectionCallbacks(server_callbacks_); + connection.addReadFilter(read_filter_); + return true; + })); + } + EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::Connected)) + .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { maybeExitDispatcher(); })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + } + + void write(const uint8_t* s, ssize_t l) { + Buffer::OwnedImpl buf(s, l); + conn_->write(buf, false); + } + + void write(const std::string& s) { + Buffer::OwnedImpl buf(s); + conn_->write(buf, false); + } + + void expectData(std::string expected) { + EXPECT_CALL(*read_filter_, onNewConnection()); + EXPECT_CALL(*read_filter_, onData(_, _)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> Network::FilterStatus { + EXPECT_EQ(buffer.toString(), expected); + buffer.drain(expected.length()); + dispatcher_->exit(); + return Network::FilterStatus::Continue; + })); + + dispatcher_->run(Event::Dispatcher::RunType::Block); + } + + void disconnect() { + EXPECT_CALL(connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); + EXPECT_CALL(server_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose)) + .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + + conn_->close(Network::ConnectionCloseType::NoFlush); + + dispatcher_->run(Event::Dispatcher::RunType::Block); + } + + Stats::IsolatedStoreImpl stats_store_; + Api::ApiPtr api_; + Event::DispatcherPtr dispatcher_; + std::shared_ptr socket_; + Network::MockListenSocketFactory socket_factory_; + Network::NopConnectionBalancerImpl connection_balancer_; + Network::ConnectionHandlerPtr connection_handler_; + Network::MockFilterChainFactory factory_; + Network::ClientConnectionPtr conn_; + NiceMock connection_callbacks_; + Network::Connection* server_connection_; + Network::MockConnectionCallbacks server_callbacks_; + std::shared_ptr read_filter_; + std::string name_; + const Network::FilterChainSharedPtr filter_chain_; + const std::vector empty_access_logs_; +}; + +// Parameterize the listener socket address version. +INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtocolRegressionTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(ProxyProtocolRegressionTest, V1Basic) { + std::string source_addr; + Buffer::OwnedImpl buff{}; + if (GetParam() == Network::Address::IpVersion::v4) { + source_addr = "202.168.0.13"; + generateV1Header(source_addr, "174.2.2.222", 52000, 80, Network::Address::IpVersion::v4, buff); + } else { + source_addr = "1:2:3::4"; + generateV1Header(source_addr, "5:6::7:8", 62000, 8000, Network::Address::IpVersion::v6, buff); + } + connect(); + + write(buff.toString() + "more data"); + + expectData("more data"); + + EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), source_addr); + EXPECT_TRUE(server_connection_->localAddressRestored()); + + disconnect(); +} + +TEST_P(ProxyProtocolRegressionTest, V2Basic) { + std::string source_addr; + Buffer::OwnedImpl buff{}; + if (GetParam() == Network::Address::IpVersion::v4) { + source_addr = "202.168.0.13"; + generateV2Header(source_addr, "174.2.2.222", 52000, 80, Network::Address::IpVersion::v4, buff); + } else { + source_addr = "1:2:3::4"; + generateV2Header(source_addr, "5:6::7:8", 62000, 8000, Network::Address::IpVersion::v6, buff); + } + connect(); + + write(buff.toString() + "more data"); + + expectData("more data"); + + EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), source_addr); + EXPECT_TRUE(server_connection_->localAddressRestored()); + + disconnect(); +} + +TEST_P(ProxyProtocolRegressionTest, V2LocalConnection) { + Buffer::OwnedImpl buff{}; + generateV2LocalHeader(buff); + connect(); + + write(buff.toString() + "more data"); + + expectData("more data"); + + if (GetParam() == Envoy::Network::Address::IpVersion::v4) { + EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), "127.0.0.1"); + } else { + EXPECT_EQ(server_connection_->remoteAddress()->ip()->addressAsString(), "::1"); + } + EXPECT_FALSE(server_connection_->localAddressRestored()); + + disconnect(); +} + +} // namespace +} // namespace ProxyProtocol +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/common/wasm/test_data/test_rust.wasm b/test/extensions/common/wasm/test_data/test_rust.wasm index b63cdc65132a..268afbc722db 100755 Binary files a/test/extensions/common/wasm/test_data/test_rust.wasm and b/test/extensions/common/wasm/test_data/test_rust.wasm differ diff --git a/test/extensions/common/wasm/wasm_test.cc b/test/extensions/common/wasm/wasm_test.cc index 03b72329683b..7e76c698f88c 100644 --- a/test/extensions/common/wasm/wasm_test.cc +++ b/test/extensions/common/wasm/wasm_test.cc @@ -531,6 +531,8 @@ TEST_P(WasmCommonTest, RemoteCode) { vm_config.mutable_code()->mutable_remote()->mutable_http_uri()->mutable_timeout()->set_seconds(5); WasmHandleSharedPtr wasm_handle; auto root_context = new Extensions::Common::Wasm::TestContext(); + NiceMock client; + NiceMock request(&client); EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_vm_start vm_cache"))); EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_done logging"))); @@ -546,7 +548,7 @@ TEST_P(WasmCommonTest, RemoteCode) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique<::Envoy::Buffer::OwnedImpl>(code); - callbacks.onSuccess(std::move(response)); + callbacks.onSuccess(request, std::move(response)); return nullptr; })); @@ -619,6 +621,8 @@ TEST_P(WasmCommonTest, RemoteCodeMultipleRetry) { ->set_value(num_retries); WasmHandleSharedPtr wasm_handle; auto root_context = new Extensions::Common::Wasm::TestContext(); + NiceMock client; + NiceMock request(&client); EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_vm_start vm_cache"))); EXPECT_CALL(*root_context, log_(spdlog::level::info, Eq("on_done logging"))); @@ -634,13 +638,13 @@ TEST_P(WasmCommonTest, RemoteCodeMultipleRetry) { if (retry-- == 0) { Http::ResponseMessagePtr response(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "503"}}})); - callbacks.onSuccess(std::move(response)); + callbacks.onSuccess(request, std::move(response)); return nullptr; } else { Http::ResponseMessagePtr response(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique<::Envoy::Buffer::OwnedImpl>(code); - callbacks.onSuccess(std::move(response)); + callbacks.onSuccess(request, std::move(response)); return nullptr; } })); diff --git a/test/extensions/common/wasm/wasm_vm_test.cc b/test/extensions/common/wasm/wasm_vm_test.cc index 7630fe158b4b..17bfd14856f7 100644 --- a/test/extensions/common/wasm/wasm_vm_test.cc +++ b/test/extensions/common/wasm/wasm_vm_test.cc @@ -107,10 +107,10 @@ MockHostFunctions* g_host_functions; void pong(void*, Word value) { g_host_functions->pong(convertWordToUint32(value)); } -Word random(void*) { return Word(g_host_functions->random()); } +Word random(void*) { return {g_host_functions->random()}; } // pong() with wrong number of arguments. -void bad_pong1(void*) { return; } +void bad_pong1(void*) {} // pong() with wrong return type. Word bad_pong2(void*, Word) { return 2; } diff --git a/test/extensions/filters/common/ext_authz/BUILD b/test/extensions/filters/common/ext_authz/BUILD index f6a977372a5c..c43c822f14b1 100644 --- a/test/extensions/filters/common/ext_authz/BUILD +++ b/test/extensions/filters/common/ext_authz/BUILD @@ -44,6 +44,7 @@ envoy_cc_test( deps = [ "//source/extensions/filters/common/ext_authz:ext_authz_http_lib", "//test/extensions/filters/common/ext_authz:ext_authz_test_common", + "//test/mocks/stream_info:stream_info_mocks", "@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index 93d7d0740124..7a4137437ca1 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -11,6 +11,7 @@ #include "test/extensions/filters/common/ext_authz/mocks.h" #include "test/extensions/filters/common/ext_authz/test_common.h" #include "test/mocks/grpc/mocks.h" +#include "test/mocks/stream_info/mocks.h" #include "test/mocks/upstream/mocks.h" #include "gmock/gmock.h" @@ -64,6 +65,7 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { MockRequestCallbacks request_callbacks_; Tracing::MockSpan span_; bool use_alpha_{}; + NiceMock stream_info_; }; INSTANTIATE_TEST_SUITE_P(Parameterized, ExtAuthzGrpcClientTest, Values(true, false)); @@ -80,7 +82,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); Http::RequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -104,7 +106,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithAllAtributes) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); Http::RequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -127,7 +129,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDenied) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); Http::RequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -151,7 +153,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); Http::RequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -178,7 +180,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); Http::RequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); @@ -196,7 +198,7 @@ TEST_P(ExtAuthzGrpcClientTest, UnknownError) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); @@ -209,7 +211,7 @@ TEST_P(ExtAuthzGrpcClientTest, CancelledAuthorizationRequest) { envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(*async_client_, sendRaw(_, _, _, _, _, _)).WillOnce(Return(&async_request_)); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); EXPECT_CALL(async_request_, cancel()); client_->cancel(); @@ -221,7 +223,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationRequestTimeout) { envoy::service::auth::v3::CheckRequest request; expectCallSend(request); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index e102d2923f00..9b075d3bbf29 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -11,6 +11,7 @@ #include "test/extensions/filters/common/ext_authz/mocks.h" #include "test/extensions/filters/common/ext_authz/test_common.h" +#include "test/mocks/stream_info/mocks.h" #include "test/mocks/upstream/mocks.h" #include "gmock/gmock.h" @@ -116,10 +117,10 @@ class ExtAuthzHttpClientTest : public testing::Test { const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - client_->onSuccess(std::move(check_response)); + client_->onSuccess(async_request_, std::move(check_response)); return message_ptr; } @@ -132,6 +133,7 @@ class ExtAuthzHttpClientTest : public testing::Test { std::unique_ptr client_; MockRequestCallbacks request_callbacks_; Tracing::MockSpan active_span_; + NiceMock stream_info_; }; // Test HTTP client config default values. @@ -293,16 +295,18 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOk) { setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_); + client_->check(request_callbacks_, request, active_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); EXPECT_CALL(*child_span, finishSpan()); - client_->onSuccess(std::move(check_response)); + client_->onSuccess(async_request_, std::move(check_response)); } +using HeaderValuePair = std::pair; + // Verify client response headers when authorization_headers_to_add is configured. TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeaders) { Tracing::MockSpan* child_span{new Tracing::MockSpan()}; @@ -318,18 +322,69 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeaders) { EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(*child_span, injectContext(_)); - // Expect that header1 will be added and header2 correctly overwritten. - EXPECT_CALL(async_client_, send_(AllOf(ContainsPairAsHeader(config_->headersToAdd().front()), - ContainsPairAsHeader(config_->headersToAdd().back())), - _, _)); - client_->check(request_callbacks_, request, active_span_); + // Expect that header1 will be added and header2 correctly overwritten. Due to this behavior, the + // append property of header value option should always be false. + const HeaderValuePair header1{"x-authz-header1", "value"}; + const HeaderValuePair header2{"x-authz-header2", "value"}; + EXPECT_CALL(async_client_, + send_(AllOf(ContainsPairAsHeader(header1), ContainsPairAsHeader(header2)), _, _)); + client_->check(request_callbacks_, request, active_span_, stream_info_); + + EXPECT_CALL(request_callbacks_, + onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); + EXPECT_CALL(*child_span, finishSpan()); + client_->onSuccess(async_request_, std::move(check_response)); +} + +// Verify client response headers when authorization_headers_to_add is configured with value from +// stream info. +TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeadersFromStreamInfo) { + const std::string yaml = R"EOF( + http_service: + server_uri: + uri: "ext_authz:9000" + cluster: "ext_authz" + timeout: 0.25s + authorization_request: + headers_to_add: + - key: "x-authz-header1" + value: "%REQ(x-request-id)%" + failure_mode_allow: true + )EOF"; + + initialize(yaml); + + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; + const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}}); + const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); + auto check_response = TestCommon::makeMessageResponse(expected_headers); + + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); + + const HeaderValuePair expected_header{"x-authz-header1", "123"}; + EXPECT_CALL(async_client_, send_(ContainsPairAsHeader(expected_header), _, _)); + + Http::RequestHeaderMapImpl request_headers; + request_headers.addCopy(Http::LowerCaseString(std::string("x-request-id")), + expected_header.second); + + StreamInfo::MockStreamInfo stream_info; + EXPECT_CALL(stream_info, getRequestHeaders()).WillOnce(Return(&request_headers)); + + envoy::service::auth::v3::CheckRequest request; + client_->check(request_callbacks_, request, active_span_, stream_info); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); EXPECT_CALL(*child_span, finishSpan()); - client_->onSuccess(std::move(check_response)); + client_->onSuccess(async_request_, std::move(check_response)); } // Verify client response headers when allow_upstream_headers is configured. @@ -348,7 +403,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_); + client_->check(request_callbacks_, request, active_span_, stream_info_); const auto check_response_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}, @@ -363,7 +418,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); EXPECT_CALL(*child_span, finishSpan()); auto message_response = TestCommon::makeMessageResponse(check_response_headers); - client_->onSuccess(std::move(message_response)); + client_->onSuccess(async_request_, std::move(message_response)); } // Test the client when a denied response is received. @@ -378,14 +433,14 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_); + client_->check(request_callbacks_, request, active_span_, stream_info_); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Forbidden"))); EXPECT_CALL(*child_span, finishSpan()); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); - client_->onSuccess(TestCommon::makeMessageResponse(expected_headers)); + client_->onSuccess(async_request_, TestCommon::makeMessageResponse(expected_headers)); } // Verify client response headers and body when the authorization server denies the request. @@ -403,14 +458,15 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithAllAttributes) { EXPECT_CALL(*child_span, injectContext(_)); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, active_span_); + client_->check(request_callbacks_, request, active_span_, stream_info_); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Unauthorized"))); EXPECT_CALL(*child_span, finishSpan()); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); - client_->onSuccess(TestCommon::makeMessageResponse(expected_headers, expected_body)); + client_->onSuccess(async_request_, + TestCommon::makeMessageResponse(expected_headers, expected_body)); } // Verify client response headers when the authorization server denies the request and @@ -429,7 +485,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { EXPECT_CALL(*child_span, injectContext(_)); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, active_span_); + client_->check(request_callbacks_, request, active_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); @@ -439,7 +495,8 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { {"x-foo", "bar", false}, {":status", "401", false}, {"foo", "bar", false}}); - client_->onSuccess(TestCommon::makeMessageResponse(check_response_headers, expected_body)); + client_->onSuccess(async_request_, + TestCommon::makeMessageResponse(check_response_headers, expected_body)); } // Test the client when an unknown error occurs. @@ -452,13 +509,13 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) { setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_); + client_->check(request_callbacks_, request, active_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); EXPECT_CALL(*child_span, finishSpan()); - client_->onFailure(Http::AsyncClient::FailureReason::Reset); + client_->onFailure(async_request_, Http::AsyncClient::FailureReason::Reset); } // Test the client when a call to authorization server returns a 5xx error status. @@ -473,13 +530,13 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequest5xxError) { setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_); + client_->check(request_callbacks_, request, active_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Service Unavailable"))); EXPECT_CALL(*child_span, finishSpan()); - client_->onSuccess(std::move(check_response)); + client_->onSuccess(async_request_, std::move(check_response)); } // Test the client when a call to authorization server returns a status code that cannot be @@ -495,13 +552,13 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestErrorParsingStatusCode) { setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_); + client_->check(request_callbacks_, request, active_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); EXPECT_CALL(*child_span, finishSpan()); - client_->onSuccess(std::move(check_response)); + client_->onSuccess(async_request_, std::move(check_response)); } // Test the client when the request is canceled. @@ -514,7 +571,7 @@ TEST_F(ExtAuthzHttpClientTest, CancelledAuthorizationRequest) { setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(*child_span, injectContext(_)); EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_)); - client_->check(request_callbacks_, request, active_span_); + client_->check(request_callbacks_, request, active_span_, stream_info_); EXPECT_CALL(async_request_, cancel()); EXPECT_CALL(*child_span, @@ -537,7 +594,8 @@ TEST_F(ExtAuthzHttpClientTest, NoCluster) { onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); EXPECT_CALL(*child_span, finishSpan()); - client_->check(request_callbacks_, envoy::service::auth::v3::CheckRequest{}, active_span_); + client_->check(request_callbacks_, envoy::service::auth::v3::CheckRequest{}, active_span_, + stream_info_); } } // namespace diff --git a/test/extensions/filters/common/ext_authz/mocks.h b/test/extensions/filters/common/ext_authz/mocks.h index a3379bf39a11..900d64d7d0fd 100644 --- a/test/extensions/filters/common/ext_authz/mocks.h +++ b/test/extensions/filters/common/ext_authz/mocks.h @@ -24,7 +24,7 @@ class MockClient : public Client { MOCK_METHOD(void, cancel, ()); MOCK_METHOD(void, check, (RequestCallbacks & callbacks, const envoy::service::auth::v3::CheckRequest& request, - Tracing::Span& parent_span)); + Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info)); }; class MockRequestCallbacks : public RequestCallbacks { diff --git a/test/extensions/filters/common/rbac/BUILD b/test/extensions/filters/common/rbac/BUILD index de24b68a22b3..6454c69e159f 100644 --- a/test/extensions/filters/common/rbac/BUILD +++ b/test/extensions/filters/common/rbac/BUILD @@ -17,6 +17,7 @@ envoy_extension_cc_test( srcs = ["matchers_test.cc"], extension_name = "envoy.filters.http.rbac", deps = [ + "//source/extensions/filters/common/expr:evaluator_lib", "//source/extensions/filters/common/rbac:matchers_lib", "//test/mocks/network:network_mocks", "//test/mocks/ssl:ssl_mocks", diff --git a/test/extensions/filters/common/rbac/engine_impl_test.cc b/test/extensions/filters/common/rbac/engine_impl_test.cc index 302bcf8cfe37..42306d4bc7f4 100644 --- a/test/extensions/filters/common/rbac/engine_impl_test.cc +++ b/test/extensions/filters/common/rbac/engine_impl_test.cc @@ -24,15 +24,11 @@ namespace Common { namespace RBAC { namespace { -void checkEngine( - const RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, - const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), - const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::RequestHeaderMapImpl(), - const envoy::config::core::v3::Metadata& metadata = envoy::config::core::v3::Metadata(), - std::string* policy_id = nullptr) { - NiceMock info; - EXPECT_CALL(Const(info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); - EXPECT_EQ(expected, engine.allowed(connection, headers, info, policy_id)); +void checkEngine(const RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, + const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), + const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::RequestHeaderMapImpl(), + const StreamInfo::StreamInfo& info = NiceMock()) { + EXPECT_EQ(expected, engine.allowed(connection, headers, info, nullptr)); } TEST(RoleBasedAccessControlEngineImpl, Disabled) { @@ -141,14 +137,16 @@ TEST(RoleBasedAccessControlEngineImpl, AllowedWhitelist) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Network::MockConnection conn; + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - EXPECT_CALL(conn, localAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, true, conn); + EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); + checkEngine(engine, true, conn, headers, info); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); - EXPECT_CALL(conn, localAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, false, conn); + EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); + checkEngine(engine, false, conn, headers, info); } TEST(RoleBasedAccessControlEngineImpl, DeniedBlacklist) { @@ -162,14 +160,16 @@ TEST(RoleBasedAccessControlEngineImpl, DeniedBlacklist) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Network::MockConnection conn; + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - EXPECT_CALL(conn, localAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, false, conn); + EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); + checkEngine(engine, false, conn, headers, info); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); - EXPECT_CALL(conn, localAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, true, conn); + EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); + checkEngine(engine, true, conn, headers, info); } TEST(RoleBasedAccessControlEngineImpl, BasicCondition) { @@ -322,13 +322,15 @@ TEST(RoleBasedAccessControlEngineImpl, MetadataCondition) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; auto label = MessageUtil::keyValueStruct("label", "prod"); envoy::config::core::v3::Metadata metadata; metadata.mutable_filter_metadata()->insert( Protobuf::MapPair("other", label)); + EXPECT_CALL(Const(info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); - checkEngine(engine, true, Envoy::Network::MockConnection(), headers, metadata); + checkEngine(engine, true, Envoy::Network::MockConnection(), headers, info); } TEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) { @@ -347,10 +349,12 @@ TEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Network::MockConnection conn; + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - EXPECT_CALL(conn, localAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, false, conn); + EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); + checkEngine(engine, false, conn, headers, info); } } // namespace diff --git a/test/extensions/filters/common/rbac/matchers_test.cc b/test/extensions/filters/common/rbac/matchers_test.cc index fa640aeca430..7aa0456585dc 100644 --- a/test/extensions/filters/common/rbac/matchers_test.cc +++ b/test/extensions/filters/common/rbac/matchers_test.cc @@ -6,6 +6,7 @@ #include "common/network/utility.h" +#include "extensions/filters/common/expr/evaluator.h" #include "extensions/filters/common/rbac/matchers.h" #include "test/mocks/network/mocks.h" @@ -29,9 +30,7 @@ void checkMatcher( const RBAC::Matcher& matcher, bool expected, const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::RequestHeaderMapImpl(), - const envoy::config::core::v3::Metadata& metadata = envoy::config::core::v3::Metadata()) { - NiceMock info; - EXPECT_CALL(Const(info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); + const StreamInfo::StreamInfo& info = NiceMock()) { EXPECT_EQ(expected, matcher.matches(connection, headers, info)); } @@ -48,16 +47,18 @@ TEST(AndMatcher, Permission_Set) { perm->set_destination_port(123); Envoy::Network::MockConnection conn; + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - EXPECT_CALL(conn, localAddress()).WillOnce(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkMatcher(RBAC::AndMatcher(set), true, conn); + checkMatcher(RBAC::AndMatcher(set), true, conn, headers, info); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 8080, false); - EXPECT_CALL(conn, localAddress()).WillOnce(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkMatcher(RBAC::AndMatcher(set), false, conn); + checkMatcher(RBAC::AndMatcher(set), false, conn, headers, info); } TEST(AndMatcher, Principal_Set) { @@ -68,21 +69,23 @@ TEST(AndMatcher, Principal_Set) { checkMatcher(RBAC::AndMatcher(set), true); principal = set.add_ids(); - auto* cidr = principal->mutable_source_ip(); + auto* cidr = principal->mutable_direct_remote_ip(); cidr->set_address_prefix("1.2.3.0"); cidr->mutable_prefix_len()->set_value(24); Envoy::Network::MockConnection conn; + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - EXPECT_CALL(conn, remoteAddress()).WillOnce(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamDirectRemoteAddress()).WillOnce(ReturnRef(addr)); - checkMatcher(RBAC::AndMatcher(set), true, conn); + checkMatcher(RBAC::AndMatcher(set), true, conn, headers, info); addr = Envoy::Network::Utility::parseInternetAddress("1.2.4.6", 123, false); - EXPECT_CALL(conn, remoteAddress()).WillOnce(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamDirectRemoteAddress()).WillOnce(ReturnRef(addr)); - checkMatcher(RBAC::AndMatcher(set), false, conn); + checkMatcher(RBAC::AndMatcher(set), false, conn, headers, info); } TEST(OrMatcher, Permission_Set) { @@ -91,36 +94,42 @@ TEST(OrMatcher, Permission_Set) { perm->set_destination_port(123); Envoy::Network::MockConnection conn; + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); - EXPECT_CALL(conn, localAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); - checkMatcher(RBAC::OrMatcher(set), false, conn); + checkMatcher(RBAC::OrMatcher(set), false, conn, headers, info); perm = set.add_rules(); perm->set_any(true); - checkMatcher(RBAC::OrMatcher(set), true, conn); + checkMatcher(RBAC::OrMatcher(set), true, conn, headers, info); } TEST(OrMatcher, Principal_Set) { envoy::config::rbac::v3::Principal::Set set; envoy::config::rbac::v3::Principal* id = set.add_ids(); - auto* cidr = id->mutable_source_ip(); + auto* cidr = id->mutable_direct_remote_ip(); cidr->set_address_prefix("1.2.3.0"); cidr->mutable_prefix_len()->set_value(24); Envoy::Network::MockConnection conn; + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.4.6", 456, false); - EXPECT_CALL(conn, remoteAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamDirectRemoteAddress()) + .Times(2) + .WillRepeatedly(ReturnRef(addr)); - checkMatcher(RBAC::OrMatcher(set), false, conn); + checkMatcher(RBAC::OrMatcher(set), false, conn, headers, info); id = set.add_ids(); id->set_any(true); - checkMatcher(RBAC::OrMatcher(set), true, conn); + checkMatcher(RBAC::OrMatcher(set), true, conn, headers, info); } TEST(NotMatcher, Permission) { @@ -160,39 +169,77 @@ TEST(HeaderMatcher, HeaderMatcher) { TEST(IPMatcher, IPMatcher) { Envoy::Network::MockConnection conn; - Envoy::Network::Address::InstanceConstSharedPtr local = + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; + Envoy::Network::Address::InstanceConstSharedPtr connectionRemote = + Envoy::Network::Utility::parseInternetAddress("12.13.14.15", 789, false); + Envoy::Network::Address::InstanceConstSharedPtr directLocal = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - Envoy::Network::Address::InstanceConstSharedPtr remote = + Envoy::Network::Address::InstanceConstSharedPtr directRemote = Envoy::Network::Utility::parseInternetAddress("4.5.6.7", 456, false); - EXPECT_CALL(conn, localAddress()).Times(2).WillRepeatedly(ReturnRef(local)); - EXPECT_CALL(conn, remoteAddress()).Times(2).WillRepeatedly(ReturnRef(remote)); - - envoy::config::core::v3::CidrRange local_cidr; - local_cidr.set_address_prefix("1.2.3.0"); - local_cidr.mutable_prefix_len()->set_value(24); - - envoy::config::core::v3::CidrRange remote_cidr; - remote_cidr.set_address_prefix("4.5.6.7"); - remote_cidr.mutable_prefix_len()->set_value(32); - - checkMatcher(IPMatcher(local_cidr, true), true, conn); - checkMatcher(IPMatcher(remote_cidr, false), true, conn); - - local_cidr.set_address_prefix("1.2.4.8"); - remote_cidr.set_address_prefix("4.5.6.0"); - - checkMatcher(IPMatcher(local_cidr, true), false, conn); - checkMatcher(IPMatcher(remote_cidr, false), false, conn); + Envoy::Network::Address::InstanceConstSharedPtr downstreamRemote = + Envoy::Network::Utility::parseInternetAddress("8.9.10.11", 456, false); + EXPECT_CALL(conn, remoteAddress()).Times(2).WillRepeatedly(ReturnRef(connectionRemote)); + EXPECT_CALL(Const(info), downstreamLocalAddress()) + .Times(2) + .WillRepeatedly(ReturnRef(directLocal)); + EXPECT_CALL(Const(info), downstreamDirectRemoteAddress()) + .Times(2) + .WillRepeatedly(ReturnRef(directRemote)); + EXPECT_CALL(Const(info), downstreamRemoteAddress()) + .Times(2) + .WillRepeatedly(ReturnRef(downstreamRemote)); + + envoy::config::core::v3::CidrRange connection_remote_cidr; + connection_remote_cidr.set_address_prefix("12.13.14.15"); + connection_remote_cidr.mutable_prefix_len()->set_value(32); + + envoy::config::core::v3::CidrRange downstream_local_cidr; + downstream_local_cidr.set_address_prefix("1.2.3.0"); + downstream_local_cidr.mutable_prefix_len()->set_value(24); + + envoy::config::core::v3::CidrRange downstream_direct_remote_cidr; + downstream_direct_remote_cidr.set_address_prefix("4.5.6.7"); + downstream_direct_remote_cidr.mutable_prefix_len()->set_value(32); + + envoy::config::core::v3::CidrRange downstream_remote_cidr; + downstream_remote_cidr.set_address_prefix("8.9.10.11"); + downstream_remote_cidr.mutable_prefix_len()->set_value(32); + + checkMatcher(IPMatcher(connection_remote_cidr, IPMatcher::Type::ConnectionRemote), true, conn, + headers, info); + checkMatcher(IPMatcher(downstream_local_cidr, IPMatcher::Type::DownstreamLocal), true, conn, + headers, info); + checkMatcher(IPMatcher(downstream_direct_remote_cidr, IPMatcher::Type::DownstreamDirectRemote), + true, conn, headers, info); + checkMatcher(IPMatcher(downstream_remote_cidr, IPMatcher::Type::DownstreamRemote), true, conn, + headers, info); + + connection_remote_cidr.set_address_prefix("4.5.6.7"); + downstream_local_cidr.set_address_prefix("1.2.4.8"); + downstream_direct_remote_cidr.set_address_prefix("4.5.6.0"); + downstream_remote_cidr.set_address_prefix("4.5.6.7"); + + checkMatcher(IPMatcher(connection_remote_cidr, IPMatcher::Type::ConnectionRemote), false, conn, + headers, info); + checkMatcher(IPMatcher(downstream_local_cidr, IPMatcher::Type::DownstreamLocal), false, conn, + headers, info); + checkMatcher(IPMatcher(downstream_direct_remote_cidr, IPMatcher::Type::DownstreamDirectRemote), + false, conn, headers, info); + checkMatcher(IPMatcher(downstream_remote_cidr, IPMatcher::Type::DownstreamRemote), false, conn, + headers, info); } TEST(PortMatcher, PortMatcher) { Envoy::Network::MockConnection conn; + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - EXPECT_CALL(conn, localAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); - checkMatcher(PortMatcher(123), true, conn); - checkMatcher(PortMatcher(456), false, conn); + checkMatcher(PortMatcher(123), true, conn, headers, info); + checkMatcher(PortMatcher(456), false, conn, headers, info); } TEST(AuthenticatedMatcher, uriSanPeerCertificate) { @@ -288,6 +335,7 @@ TEST(AuthenticatedMatcher, NoSSL) { TEST(MetadataMatcher, MetadataMatcher) { Envoy::Network::MockConnection conn; Envoy::Http::RequestHeaderMapImpl header; + NiceMock info; auto label = MessageUtil::keyValueStruct("label", "prod"); envoy::config::core::v3::Metadata metadata; @@ -295,15 +343,16 @@ TEST(MetadataMatcher, MetadataMatcher) { Protobuf::MapPair("other", label)); metadata.mutable_filter_metadata()->insert( Protobuf::MapPair("rbac", label)); + EXPECT_CALL(Const(info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); envoy::type::matcher::v3::MetadataMatcher matcher; matcher.set_filter("rbac"); matcher.add_path()->set_key("label"); matcher.mutable_value()->mutable_string_match()->set_exact("test"); - checkMatcher(MetadataMatcher(matcher), false, conn, header, metadata); + checkMatcher(MetadataMatcher(matcher), false, conn, header, info); matcher.mutable_value()->mutable_string_match()->set_exact("prod"); - checkMatcher(MetadataMatcher(matcher), true, conn, header, metadata); + checkMatcher(MetadataMatcher(matcher), true, conn, header, info); } TEST(PolicyMatcher, PolicyMatcher) { @@ -312,10 +361,13 @@ TEST(PolicyMatcher, PolicyMatcher) { policy.add_permissions()->set_destination_port(456); policy.add_principals()->mutable_authenticated()->mutable_principal_name()->set_exact("foo"); policy.add_principals()->mutable_authenticated()->mutable_principal_name()->set_exact("bar"); + Expr::BuilderPtr builder = Expr::createBuilder(nullptr); - RBAC::PolicyMatcher matcher(policy, nullptr); + RBAC::PolicyMatcher matcher(policy, builder.get()); Envoy::Network::MockConnection conn; + Envoy::Http::RequestHeaderMapImpl headers; + NiceMock info; auto ssl = std::make_shared(); Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); @@ -327,19 +379,19 @@ TEST(PolicyMatcher, PolicyMatcher) { EXPECT_CALL(*ssl, subjectPeerCertificate()).WillRepeatedly(ReturnRef("subject")); EXPECT_CALL(Const(conn), ssl()).Times(2).WillRepeatedly(Return(ssl)); - EXPECT_CALL(conn, localAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); - checkMatcher(matcher, true, conn); + checkMatcher(matcher, true, conn, headers, info); EXPECT_CALL(Const(conn), ssl()).Times(2).WillRepeatedly(Return(nullptr)); - EXPECT_CALL(conn, localAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); - checkMatcher(matcher, false, conn); + checkMatcher(matcher, false, conn, headers, info); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 789, false); - EXPECT_CALL(conn, localAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); + EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(2).WillRepeatedly(ReturnRef(addr)); - checkMatcher(matcher, false, conn); + checkMatcher(matcher, false, conn, headers, info); } TEST(RequestedServerNameMatcher, ValidRequestedServerName) { diff --git a/test/extensions/filters/http/adaptive_concurrency/BUILD b/test/extensions/filters/http/adaptive_concurrency/BUILD index b0ba1a1dbdd2..fbe81cc26327 100644 --- a/test/extensions/filters/http/adaptive_concurrency/BUILD +++ b/test/extensions/filters/http/adaptive_concurrency/BUILD @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/extensions/filters/http/adaptive_concurrency:adaptive_concurrency_filter_lib", - "//source/extensions/filters/http/adaptive_concurrency/concurrency_controller:concurrency_controller_lib", + "//source/extensions/filters/http/adaptive_concurrency/controller:controller_lib", "//test/mocks/http:http_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc index a198000c8b15..af586eda4dc2 100644 --- a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc @@ -4,7 +4,7 @@ #include "envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.validate.h" #include "extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h" -#include "extensions/filters/http/adaptive_concurrency/concurrency_controller/concurrency_controller.h" +#include "extensions/filters/http/adaptive_concurrency/controller/controller.h" #include "test/mocks/http/mocks.h" #include "test/mocks/stream_info/mocks.h" @@ -23,9 +23,9 @@ namespace HttpFilters { namespace AdaptiveConcurrency { namespace { -using ConcurrencyController::RequestForwardingAction; +using Controller::RequestForwardingAction; -class MockConcurrencyController : public ConcurrencyController::ConcurrencyController { +class MockConcurrencyController : public Controller::ConcurrencyController { public: MOCK_METHOD(RequestForwardingAction, forwardingDecision, ()); MOCK_METHOD(void, cancelLatencySample, ()); diff --git a/test/extensions/filters/http/adaptive_concurrency/concurrency_controller/BUILD b/test/extensions/filters/http/adaptive_concurrency/controller/BUILD similarity index 94% rename from test/extensions/filters/http/adaptive_concurrency/concurrency_controller/BUILD rename to test/extensions/filters/http/adaptive_concurrency/controller/BUILD index d9b86dfe1c69..94592fb47acf 100644 --- a/test/extensions/filters/http/adaptive_concurrency/concurrency_controller/BUILD +++ b/test/extensions/filters/http/adaptive_concurrency/controller/BUILD @@ -18,7 +18,7 @@ envoy_extension_cc_test( deps = [ "//source/common/stats:isolated_store_lib", "//source/extensions/filters/http/adaptive_concurrency:adaptive_concurrency_filter_lib", - "//source/extensions/filters/http/adaptive_concurrency/concurrency_controller:concurrency_controller_lib", + "//source/extensions/filters/http/adaptive_concurrency/controller:controller_lib", "//test/mocks/event:event_mocks", "//test/mocks/runtime:runtime_mocks", "//test/test_common:simulated_time_system_lib", diff --git a/test/extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller_test.cc b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc similarity index 89% rename from test/extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller_test.cc rename to test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc index a7e02b3a8ea1..447d9627f9f1 100644 --- a/test/extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc @@ -7,9 +7,10 @@ #include "common/stats/isolated_store_impl.h" #include "extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.h" -#include "extensions/filters/http/adaptive_concurrency/concurrency_controller/concurrency_controller.h" -#include "extensions/filters/http/adaptive_concurrency/concurrency_controller/gradient_controller.h" +#include "extensions/filters/http/adaptive_concurrency/controller/controller.h" +#include "extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/test_common/simulated_time_system.h" @@ -18,9 +19,6 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using testing::AllOf; -using testing::Ge; -using testing::Le; using testing::NiceMock; using testing::Return; @@ -28,7 +26,7 @@ namespace Envoy { namespace Extensions { namespace HttpFilters { namespace AdaptiveConcurrency { -namespace ConcurrencyController { +namespace Controller { namespace { GradientControllerConfig makeConfig(const std::string& yaml_config, @@ -77,7 +75,7 @@ class GradientControllerTest : public testing::Test { } Event::SimulatedTimeSystem time_system_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; NiceMock runtime_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; @@ -623,8 +621,67 @@ TEST_F(GradientControllerTest, TimerAccuracyTestNoJitter) { } } +// Test that consecutively setting the concurrency limit to the minimum triggers a minRTT +// recalculation. +TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { + const std::string yaml = R"EOF( +sample_aggregate_percentile: + value: 50 +concurrency_limit_params: + max_concurrency_limit: + concurrency_update_interval: 0.1s +min_rtt_calc_params: + jitter: + value: 0.0 + interval: 3600s + request_count: 5 + buffer: + value: 0 + min_concurrency: 7 +)EOF"; + + auto controller = makeController(yaml); + EXPECT_EQ(controller->concurrencyLimit(), 7); + + // Force a minRTT of 5ms. + advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); + EXPECT_EQ( + 5, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + + // Ensure that the concurrency window increases on its own due to the headroom calculation with + // the max gradient. + time_system_.sleep(std::chrono::milliseconds(101)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_GE(controller->concurrencyLimit(), 7); + EXPECT_LE(controller->concurrencyLimit() / 7.0, 2.0); + + // Make it seem as if the recorded latencies are consistently higher than the measured minRTT to + // induce a minRTT recalculation after 5 iterations. + const auto elevated_latency = std::chrono::milliseconds(10); + for (int recalcs = 0; recalcs < 5; ++recalcs) { + for (int i = 1; i <= 5; ++i) { + tryForward(controller, true); + controller->recordLatencySample(elevated_latency); + } + time_system_.sleep(std::chrono::milliseconds(101)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + } + + // Verify that the concurrency limit starts growing with newly measured minRTT. + for (int recalcs = 0; recalcs < 10; ++recalcs) { + const auto last_concurrency = controller->concurrencyLimit(); + for (int i = 1; i <= 5; ++i) { + tryForward(controller, true); + controller->recordLatencySample(elevated_latency); + } + time_system_.sleep(std::chrono::milliseconds(101)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_GE(controller->concurrencyLimit(), last_concurrency); + } +} + } // namespace -} // namespace ConcurrencyController +} // namespace Controller } // namespace AdaptiveConcurrency } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/aws_lambda/BUILD b/test/extensions/filters/http/aws_lambda/BUILD index fdcc88cbe5ef..312545218bfc 100644 --- a/test/extensions/filters/http/aws_lambda/BUILD +++ b/test/extensions/filters/http/aws_lambda/BUILD @@ -23,6 +23,19 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "aws_lambda_filter_integration_test", + srcs = ["aws_lambda_filter_integration_test.cc"], + extension_name = "envoy.filters.http.aws_lambda", + deps = [ + "//source/common/http:header_map_lib", + "//source/extensions/filters/http/aws_lambda:aws_lambda_filter_lib", + "//source/extensions/filters/http/aws_lambda:config", + "//test/integration:http_integration_lib", + "//test/test_common:utility_lib", + ], +) + envoy_extension_cc_test( name = "arn_test", srcs = ["arn_test.cc"], diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc new file mode 100644 index 000000000000..b8e552a85283 --- /dev/null +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc @@ -0,0 +1,316 @@ +#include + +#include "source/extensions/filters/http/aws_lambda/request_response.pb.h" + +#include "test/integration/http_integration.h" +#include "test/test_common/environment.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using source::extensions::filters::http::aws_lambda::Request; + +namespace Envoy { +namespace { + +class AwsLambdaFilterIntegrationTest : public testing::TestWithParam, + public HttpIntegrationTest { +public: + AwsLambdaFilterIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {} + + void SetUp() override { + // Set these environment variables to quickly sign credentials instead of attempting to query + // instance metadata and timing-out. + TestEnvironment::setEnvVar("AWS_ACCESS_KEY_ID", "aws-user", 1 /*overwrite*/); + TestEnvironment::setEnvVar("AWS_SECRET_ACCESS_KEY", "secret", 1 /*overwrite*/); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP1); + } + + void TearDown() override { + test_server_.reset(); + fake_upstream_connection_.reset(); + fake_upstreams_.clear(); + } + + void setupLambdaFilter(bool passthrough) { + const std::string filter = + R"EOF( + name: envoy.filters.http.aws_lambda + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.aws_lambda.v3.Config + arn: "arn:aws:lambda:us-west-2:123456789:function:test" + payload_passthrough: {} + )EOF"; + config_helper_.addFilter(fmt::format(filter, passthrough)); + + constexpr auto metadata_yaml = R"EOF( + com.amazonaws.lambda: + egress_gateway: true + )EOF"; + config_helper_.addClusterFilterMetadata(metadata_yaml); + } + + template + ABSL_MUST_USE_RESULT testing::AssertionResult compareMaps(const TMap& m1, const TMap& m2) { + for (auto&& kvp : m1) { + auto it = m2.find(kvp.first); + if (it == m2.end()) { + return AssertionFailure() << "Failed to find value: " << kvp.first; + ; + } + if (it->second != kvp.second) { + return AssertionFailure() << "Values of key: " << kvp.first + << " are different. expected: " << kvp.second + << " actual: " << it->second; + } + } + return AssertionSuccess(); + } + + void runTest(const Http::RequestHeaderMap& request_headers, const std::string& request_body, + const std::string& expected_json_request, + const Http::ResponseHeaderMap& lambda_response_headers, + const std::string& lambda_response_body, + const Http::ResponseHeaderMap& expected_response_headers, + const std::vector& expected_response_cookies, + const std::string& expected_response_body) { + + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response; + if (request_body.empty()) { + response = codec_client_->makeHeaderOnlyRequest(request_headers); + } else { + auto encoder_decoder = codec_client_->startRequest(request_headers); + request_encoder_ = &encoder_decoder.first; + response = std::move(encoder_decoder.second); + // Chunk the data to simulate a real request. + const size_t chunk_size = 5; + size_t i = 0; + for (; i < request_body.length() / chunk_size; i++) { + Buffer::OwnedImpl buffer(request_body.substr(i * chunk_size, chunk_size)); + codec_client_->sendData(*request_encoder_, buffer, false); + } + // Send the last chunk flagged as end_stream. + Buffer::OwnedImpl buffer( + request_body.substr(i * chunk_size, request_body.length() % chunk_size)); + codec_client_->sendData(*request_encoder_, buffer, true); + } + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + Request transformed_request; + Request expected_request; + TestUtility::loadFromJson(upstream_request_->body().toString(), transformed_request); + TestUtility::loadFromJson(expected_json_request, expected_request); + + EXPECT_EQ(expected_request.raw_path(), transformed_request.raw_path()); + EXPECT_EQ(expected_request.method(), transformed_request.method()); + EXPECT_EQ(expected_request.body(), transformed_request.body()); + EXPECT_EQ(expected_request.is_base64_encoded(), transformed_request.is_base64_encoded()); + EXPECT_TRUE(compareMaps(expected_request.headers(), transformed_request.headers())); + EXPECT_TRUE(compareMaps(expected_request.query_string_parameters(), + transformed_request.query_string_parameters())); + + if (lambda_response_body.empty()) { + upstream_request_->encodeHeaders(lambda_response_headers, true /*end_stream*/); + } else { + upstream_request_->encodeHeaders(lambda_response_headers, false /*end_stream*/); + Buffer::OwnedImpl buffer(lambda_response_body); + upstream_request_->encodeData(buffer, true); + } + + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + + // verify headers + expected_response_headers.iterate( + [](const Http::HeaderEntry& expected_entry, void* ctx) { + const auto* actual_headers = static_cast(ctx); + const auto* actual_entry = actual_headers->get( + Http::LowerCaseString(std::string(expected_entry.key().getStringView()))); + EXPECT_EQ(actual_entry->value().getStringView(), expected_entry.value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }, + // Because headers() returns a pointer to const we have to cast it + // away to match the callback signature. This is safe because we do + // not call any non-const functions on the headers in the callback. + const_cast(&response->headers())); + + // verify cookies if we have any + if (!expected_response_cookies.empty()) { + std::vector actual_cookies; + response->headers().iterate( + [](const Http::HeaderEntry& entry, void* ctx) { + auto* list = static_cast*>(ctx); + if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { + list->emplace_back(entry.value().getStringView()); + } + return Http::HeaderMap::Iterate::Continue; + }, + &actual_cookies); + + EXPECT_EQ(expected_response_cookies, actual_cookies); + } + + // verify body + EXPECT_STREQ(expected_response_body.c_str(), response->body().c_str()); + + // cleanup + codec_client_->close(); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, AwsLambdaFilterIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AwsLambdaFilterIntegrationTest, JsonWrappedHeaderOnlyRequest) { + setupLambdaFilter(false /*passthrough*/); + HttpIntegrationTest::initialize(); + + Http::TestRequestHeaderMapImpl request_headers{{":scheme", "http"}, + {":method", "GET"}, + {":path", "/resize?type=jpg"}, + {":authority", "host"}, + {"s3-location", "mybucket/images/123.jpg"}}; + constexpr auto expected_json_request = R"EOF( + { + "rawPath": "/resize?type=jpg", + "method": "GET", + "headers":{ "s3-location": "mybucket/images/123.jpg"}, + "queryStringParameters": {"type":"jpg"}, + "body": "", + "isBase64Encoded": false + } + )EOF"; + + const std::string lambda_response_body = R"EOF( + { + "body": "my-bucket/123-small.jpg", + "isBase64Encoded": false, + "statusCode": 200, + "cookies": ["user=John", "session-id=1337"], + "headers": {"x-amz-custom-header": "envoy,proxy"} + } + )EOF"; + + Http::TestResponseHeaderMapImpl lambda_response_headers{ + {":status", "201"}, + {"content-type", "application/json"}, + {"content-length", fmt::format("{}", lambda_response_body.length())}}; + + Http::TestResponseHeaderMapImpl expected_response_headers{{":status", "200"}, + {"content-type", "application/json"}, + {"x-amz-custom-header", "envoy,proxy"}}; + std::vector expected_response_cookies{"user=John", "session-id=1337"}; + constexpr auto expected_response_body = "my-bucket/123-small.jpg"; + runTest(request_headers, "" /*request_body*/, expected_json_request, lambda_response_headers, + lambda_response_body, expected_response_headers, expected_response_cookies, + expected_response_body); +} + +TEST_P(AwsLambdaFilterIntegrationTest, JsonWrappedPlainBody) { + setupLambdaFilter(false /*passthrough*/); + HttpIntegrationTest::initialize(); + + Http::TestRequestHeaderMapImpl request_headers{{":scheme", "http"}, + {":method", "GET"}, + {":path", "/resize?type=jpg"}, + {":authority", "host"}, + {"content-type", "text/plain"}, + {"xray-trace-id", "qwerty12345"}}; + + constexpr auto request_body = "AWS Lambda is a FaaS platform"; + + constexpr auto expected_json_request = R"EOF( + { + "rawPath": "/resize?type=jpg", + "method": "GET", + "headers":{ "xray-trace-id": "qwerty12345"}, + "queryStringParameters": {"type":"jpg"}, + "body": "AWS Lambda is a FaaS platform", + "isBase64Encoded": false + } + )EOF"; + + const std::string lambda_response_body = R"EOF( + { + "body": "AWS Lambda is cheap!", + "isBase64Encoded": false, + "statusCode": 200, + "cookies": ["user=John", "session-id=1337"], + "headers": {"x-amz-custom-header": "envoy,proxy"} + } + )EOF"; + + Http::TestResponseHeaderMapImpl lambda_response_headers{ + {":status", "201"}, + {"content-type", "application/json"}, + {"content-length", fmt::format("{}", lambda_response_body.length())}}; + + Http::TestResponseHeaderMapImpl expected_response_headers{{":status", "200"}, + {"content-type", "application/json"}, + {"x-amz-custom-header", "envoy,proxy"}}; + std::vector expected_response_cookies{"user=John", "session-id=1337"}; + constexpr auto expected_response_body = "AWS Lambda is cheap!"; + runTest(request_headers, request_body, expected_json_request, lambda_response_headers, + lambda_response_body, expected_response_headers, expected_response_cookies, + expected_response_body); +} + +TEST_P(AwsLambdaFilterIntegrationTest, JsonWrappedBinaryBody) { + setupLambdaFilter(false /*passthrough*/); + HttpIntegrationTest::initialize(); + + Http::TestRequestHeaderMapImpl request_headers{{":scheme", "http"}, + {":method", "GET"}, + {":path", "/resize?type=jpg"}, + {":authority", "host"}, + {"content-type", "application/octet-stream"}, + {"xray-trace-id", "qwerty12345"}}; + + constexpr auto request_body = "this should get base64 encoded"; + + constexpr auto expected_json_request = R"EOF( + { + "rawPath": "/resize?type=jpg", + "method": "GET", + "headers":{ "xray-trace-id": "qwerty12345"}, + "queryStringParameters": {"type":"jpg"}, + "body": "dGhpcyBzaG91bGQgZ2V0IGJhc2U2NCBlbmNvZGVk", + "isBase64Encoded": true + } + )EOF"; + + const std::string lambda_response_body = R"EOF( + { + "body": "QVdTIExhbWJkYSBpcyBjaGVhcCE=", + "isBase64Encoded": true, + "statusCode": 200, + "cookies": ["user=John", "session-id=1337"], + "headers": {"x-amz-custom-header": "envoy,proxy"} + } + )EOF"; + + Http::TestResponseHeaderMapImpl lambda_response_headers{ + {":status", "201"}, + {"content-type", "application/json"}, + {"content-length", fmt::format("{}", lambda_response_body.length())}}; + + Http::TestResponseHeaderMapImpl expected_response_headers{{":status", "200"}, + {"content-type", "application/json"}, + {"x-amz-custom-header", "envoy,proxy"}}; + std::vector expected_response_cookies{"user=John", "session-id=1337"}; + constexpr auto expected_response_body = "AWS Lambda is cheap!"; + runTest(request_headers, request_body, expected_json_request, lambda_response_headers, + lambda_response_body, expected_response_headers, expected_response_cookies, + expected_response_body); +} + +} // namespace +} // namespace Envoy diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc index fc390631b742..0cc3b24f13c4 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc @@ -1,4 +1,9 @@ +#include + #include "envoy/config/core/v3/base.pb.h" +#include "envoy/http/filter.h" + +#include "source/extensions/filters/http/aws_lambda/request_response.pb.validate.h" #include "extensions/filters/http/aws_lambda/aws_lambda_filter.h" #include "extensions/filters/http/well_known_names.h" @@ -17,9 +22,14 @@ namespace AwsLambdaFilter { namespace { using Common::Aws::MockSigner; +using ::testing::An; +using ::testing::ElementsAre; +using ::testing::InSequence; using ::testing::Invoke; +using ::testing::Pair; using ::testing::Return; using ::testing::ReturnRef; +using ::testing::UnorderedElementsAre; constexpr auto Arn = "arn:aws:lambda:us-west-2:1337:function:fun"; class AwsLambdaFilterTest : public ::testing::Test { @@ -28,47 +38,71 @@ class AwsLambdaFilterTest : public ::testing::Test { signer_ = std::make_shared>(); filter_ = std::make_unique(settings, signer_); filter_->setDecoderFilterCallbacks(decoder_callbacks_); + filter_->setEncoderFilterCallbacks(encoder_callbacks_); + const std::string metadata_yaml = "egress_gateway: true"; + + ProtobufWkt::Struct cluster_metadata; + TestUtility::loadFromYaml(metadata_yaml, cluster_metadata); + metadata_.mutable_filter_metadata()->insert({"com.amazonaws.lambda", cluster_metadata}); + ON_CALL(*decoder_callbacks_.cluster_info_, metadata()).WillByDefault(ReturnRef(metadata_)); } std::unique_ptr filter_; std::shared_ptr> signer_; NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; + envoy::config::core::v3::Metadata metadata_; }; /** * Requests that are _not_ header only, should result in StopIteration. */ TEST_F(AwsLambdaFilterTest, DecodingHeaderStopIteration) { - setupFilter({Arn, true /*passthrough*/}); + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); Http::TestRequestHeaderMapImpl headers; const auto result = filter_->decodeHeaders(headers, false /*end_stream*/); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, result); } /** - * Header only requests should be signed and Continue iteration. - * Also, if x-forwarded-proto header is found, it should be removed when signing. + * Header only pass-through requests should be signed and Continue iteration. */ TEST_F(AwsLambdaFilterTest, HeaderOnlyShouldContinue) { - setupFilter({Arn, true /*passthrough*/}); - Http::TestRequestHeaderMapImpl input_headers{{":method", "GET"}, {"x-forwarded-proto", "http"}}; + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); + EXPECT_CALL(*signer_, sign(_)); + Http::TestRequestHeaderMapImpl input_headers; const auto result = filter_->decodeHeaders(input_headers, true /*end_stream*/); EXPECT_EQ(Http::FilterHeadersStatus::Continue, result); + + Http::TestResponseHeaderMapImpl response_headers; + const auto encode_result = filter_->encodeHeaders(response_headers, true /*end_stream*/); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, encode_result); +} + +/** + * If the filter is configured with an invalid ARN, then we stop. + */ +TEST_F(AwsLambdaFilterTest, ConfigurationWithInvalidARN) { + setupFilter({"BadARN", InvocationMode::Synchronous, true /*passthrough*/}); + EXPECT_CALL(decoder_callbacks_, sendLocalReply); + Http::TestRequestHeaderMapImpl headers; + const auto result = filter_->decodeHeaders(headers, true /*end_stream*/); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, result); } /** - * If there's a per-route configuration and the target cluster does not have the AWS Lambda - * metadata, then we should skip the filter. + * If there's a per-route configuration with an invalid ARN, then we stop. */ -TEST_F(AwsLambdaFilterTest, PerRouteConfigNoClusterMetadata) { - setupFilter({Arn, true /*passthrough*/}); - FilterSettings route_settings{Arn, true /*passthrough*/}; +TEST_F(AwsLambdaFilterTest, PerRouteConfigWithInvalidARN) { + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); + FilterSettings route_settings{"BadARN", InvocationMode::Synchronous, true /*passthrough*/}; ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().AwsLambda)) .WillByDefault(Return(&route_settings)); + EXPECT_CALL(decoder_callbacks_, sendLocalReply); Http::TestRequestHeaderMapImpl headers; const auto result = filter_->decodeHeaders(headers, true /*end_stream*/); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, result); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, result); } /** @@ -81,20 +115,26 @@ TEST_F(AwsLambdaFilterTest, PerRouteConfigWrongClusterMetadata) { )EOF"; ProtobufWkt::Struct cluster_metadata; - envoy::config::core::v3::Metadata metadata; // What should this type be? + envoy::config::core::v3::Metadata metadata; TestUtility::loadFromYaml(metadata_yaml, cluster_metadata); metadata.mutable_filter_metadata()->insert({"WrongMetadataKey", cluster_metadata}); - setupFilter({Arn, true /*passthrough*/}); - FilterSettings route_settings{Arn, true /*passthrough*/}; + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); + FilterSettings route_settings{Arn, InvocationMode::Synchronous, true /*passthrough*/}; ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().AwsLambda)) .WillByDefault(Return(&route_settings)); ON_CALL(*decoder_callbacks_.cluster_info_, metadata()).WillByDefault(ReturnRef(metadata)); Http::TestRequestHeaderMapImpl headers; - const auto result = filter_->decodeHeaders(headers, false /*end_stream*/); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, result); + + const auto decode_header_result = filter_->decodeHeaders(headers, false /*end_stream*/); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, decode_header_result); + + Buffer::OwnedImpl buf; + const auto decode_data_result = filter_->decodeData(buf, true /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, decode_data_result); + EXPECT_EQ(0, buf.length()); } /** @@ -102,29 +142,19 @@ TEST_F(AwsLambdaFilterTest, PerRouteConfigWrongClusterMetadata) { * process the request (i.e. StopIteration if end_stream is false) */ TEST_F(AwsLambdaFilterTest, PerRouteConfigCorrectClusterMetadata) { - const std::string metadata_yaml = R"EOF( - egress_gateway: true - )EOF"; - - ProtobufWkt::Struct cluster_metadata; - envoy::config::core::v3::Metadata metadata; // What should this type be? - TestUtility::loadFromYaml(metadata_yaml, cluster_metadata); - metadata.mutable_filter_metadata()->insert({"com.amazonaws.lambda", cluster_metadata}); - - setupFilter({Arn, true /*passthrough*/}); - FilterSettings route_settings{Arn, true /*passthrough*/}; + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); + FilterSettings route_settings{Arn, InvocationMode::Synchronous, true /*passthrough*/}; ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().AwsLambda)) .WillByDefault(Return(&route_settings)); - ON_CALL(*decoder_callbacks_.cluster_info_, metadata()).WillByDefault(ReturnRef(metadata)); Http::TestRequestHeaderMapImpl headers; const auto result = filter_->decodeHeaders(headers, false /*end_stream*/); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, result); } TEST_F(AwsLambdaFilterTest, DecodeDataShouldBuffer) { - setupFilter({Arn, true /*passthrough*/}); + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); Http::TestRequestHeaderMapImpl headers; const auto header_result = filter_->decodeHeaders(headers, false /*end_stream*/); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, header_result); @@ -134,16 +164,443 @@ TEST_F(AwsLambdaFilterTest, DecodeDataShouldBuffer) { } TEST_F(AwsLambdaFilterTest, DecodeDataShouldSign) { - setupFilter({Arn, true /*passthrough*/}); + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); Http::TestRequestHeaderMapImpl headers; const auto header_result = filter_->decodeHeaders(headers, false /*end_stream*/); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, header_result); Buffer::OwnedImpl buffer; - ON_CALL(decoder_callbacks_, decodingBuffer()).WillByDefault(Return(&buffer)); + + InSequence seq; + EXPECT_CALL(decoder_callbacks_, addDecodedData(_, false)).Times(1); + EXPECT_CALL(decoder_callbacks_, decodingBuffer).WillOnce(Return(&buffer)); + EXPECT_CALL(*signer_, sign(An(), An())).Times(1); + const auto data_result = filter_->decodeData(buffer, true /*end_stream*/); EXPECT_EQ(Http::FilterDataStatus::Continue, data_result); } +TEST_F(AwsLambdaFilterTest, DecodeHeadersInvocationModeSetsHeader) { + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); + Http::TestRequestHeaderMapImpl headers; + const auto header_result = filter_->decodeHeaders(headers, true /*end_stream*/); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, header_result); + + std::string invocation_header_value; + headers.iterate( + [](const Http::HeaderEntry& entry, void* ctx) { + auto* out = static_cast(ctx); + if (entry.key().getStringView() == "x-amz-invocation-type") { + out->append(std::string(entry.value().getStringView())); + return Http::HeaderMap::Iterate::Break; + } + return Http::HeaderMap::Iterate::Continue; + }, + &invocation_header_value); + + EXPECT_EQ("RequestResponse", invocation_header_value); +} + +/** + * A header-only request with pass-through turned off should result in: + * - a request with JSON body. + * - content-length header set appropriately + * - content-type header set to application/json + * - headers with multiple values coalesced with a comma + */ +TEST_F(AwsLambdaFilterTest, DecodeHeadersOnlyRequestWithJsonOn) { + using source::extensions::filters::http::aws_lambda::Request; + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + Buffer::OwnedImpl json_buf; + auto on_add_decoded_data = [&json_buf](Buffer::Instance& buf, bool) { json_buf.move(buf); }; + ON_CALL(decoder_callbacks_, addDecodedData(_, _)).WillByDefault(Invoke(on_add_decoded_data)); + Http::TestRequestHeaderMapImpl headers; + headers.setContentLength(0); + headers.setPath("/resource?proxy=envoy"); + headers.setMethod("GET"); + headers.addCopy("x-custom-header", "unit"); + headers.addCopy("x-custom-header", "test"); + const auto header_result = filter_->decodeHeaders(headers, true /*end_stream*/); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, header_result); + + // Assert it's not empty + ASSERT_GT(json_buf.length(), 0); + + ASSERT_NE(headers.ContentType(), nullptr); + EXPECT_EQ("application/json", headers.ContentType()->value().getStringView()); + + // Assert the true (post-transformation) content-length sent to the Lambda endpoint. + ASSERT_NE(headers.ContentLength(), nullptr); + EXPECT_EQ(fmt::format("{}", json_buf.length()), headers.ContentLength()->value().getStringView()); + + // The best way to verify the generated JSON is to deserialize it and inspect it. + Request req; + TestUtility::loadFromJson(json_buf.toString(), req); + + // Assert the content-length wrapped in JSON reflects the original request's value. + EXPECT_THAT(req.headers(), UnorderedElementsAre(Pair("content-length", "0"), + Pair("x-custom-header", "unit,test"))); + EXPECT_THAT(req.query_string_parameters(), UnorderedElementsAre(Pair("proxy", "envoy"))); + EXPECT_STREQ("/resource?proxy=envoy", req.raw_path().c_str()); + EXPECT_FALSE(req.is_base64_encoded()); + EXPECT_TRUE(req.body().empty()); + EXPECT_STREQ("GET", req.method().c_str()); +} + +/** + * A request with text payload and pass-through turned off should result in: + * - a request with JSON body containing the original payload + * - content-length header set appropriately + * - content-type header set to application/json + * - headers with multiple values coalesced with a comma + */ +TEST_F(AwsLambdaFilterTest, DecodeDataWithTextualBodyWithJsonOn) { + using source::extensions::filters::http::aws_lambda::Request; + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + + Buffer::OwnedImpl decoded_buf; + constexpr absl::string_view expected_plain_text = "Foo bar bazz"; + decoded_buf.add(expected_plain_text); + + auto on_modify_decoding_buffer = [&decoded_buf](std::function cb) { + cb(decoded_buf); + }; + EXPECT_CALL(decoder_callbacks_, decodingBuffer).WillRepeatedly(Return(&decoded_buf)); + EXPECT_CALL(decoder_callbacks_, modifyDecodingBuffer) + .WillRepeatedly(Invoke(on_modify_decoding_buffer)); + + std::array textual_mime_types = {"application/json", "application/javascript", + "application/xml", "text/plain"}; + + for (auto mime_type : textual_mime_types) { + Http::TestRequestHeaderMapImpl headers; + headers.setContentLength(expected_plain_text.length()); + headers.setPath("/resource?proxy=envoy"); + headers.setMethod("POST"); + headers.setContentType(mime_type); + headers.addCopy("x-custom-header", "unit"); + headers.addCopy("x-custom-header", "test"); + const auto header_result = filter_->decodeHeaders(headers, false /*end_stream*/); + ASSERT_EQ(Http::FilterHeadersStatus::StopIteration, header_result); + + const auto data_result = filter_->decodeData(decoded_buf, true /*end_stream*/); + ASSERT_EQ(Http::FilterDataStatus::Continue, data_result); + + // Assert decoded buffer is not drained + ASSERT_GT(decoded_buf.length(), 0); + + ASSERT_NE(headers.ContentType(), nullptr); + EXPECT_EQ("application/json", headers.ContentType()->value().getStringView()); + + // Assert the true (post-transformation) content-length sent to the Lambda endpoint. + ASSERT_NE(headers.ContentLength(), nullptr); + EXPECT_EQ(fmt::format("{}", decoded_buf.length()), + headers.ContentLength()->value().getStringView()); + + // The best way to verify the generated JSON is to deserialize it and inspect it. + Request req; + TestUtility::loadFromJson(decoded_buf.toString(), req); + + // Assert the content-length wrapped in JSON reflects the original request's value. + EXPECT_THAT(req.headers(), + UnorderedElementsAre( + Pair("content-length", fmt::format("{}", expected_plain_text.length())), + Pair("content-type", mime_type), Pair("x-custom-header", "unit,test"))); + EXPECT_THAT(req.query_string_parameters(), UnorderedElementsAre(Pair("proxy", "envoy"))); + EXPECT_STREQ("/resource?proxy=envoy", req.raw_path().c_str()); + EXPECT_STREQ("POST", req.method().c_str()); + EXPECT_FALSE(req.is_base64_encoded()); + ASSERT_FALSE(req.body().empty()); + EXPECT_STREQ(expected_plain_text.data(), req.body().c_str()); + + // reset the buffer for the next iteration + decoded_buf.drain(decoded_buf.length()); + decoded_buf.add(expected_plain_text); + } +} + +/** + * A request with binary payload and pass-through turned off should result in a JSON payload with + * isBase64Encoded flag set. + * binary payload is determined by looking at both transfer-encoding and content-type. + */ +TEST_F(AwsLambdaFilterTest, DecodeDataWithBinaryBodyWithJsonOn) { + using source::extensions::filters::http::aws_lambda::Request; + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + + Buffer::OwnedImpl decoded_buf; + const absl::string_view fake_binary_data = "this should get base64 encoded"; + decoded_buf.add(fake_binary_data); + EXPECT_CALL(decoder_callbacks_, decodingBuffer).WillRepeatedly(Return(&decoded_buf)); + auto on_modify_decoding_buffer = [&decoded_buf](std::function cb) { + cb(decoded_buf); + }; + EXPECT_CALL(decoder_callbacks_, modifyDecodingBuffer) + .WillRepeatedly(Invoke(on_modify_decoding_buffer)); + std::array binary_mime_types = {"", "application/pdf", "gzipped"}; + for (auto mime_type : binary_mime_types) { + Http::TestRequestHeaderMapImpl headers; + headers.setPath("/"); + headers.setMethod("POST"); + headers.setContentLength(fake_binary_data.length()); + if (mime_type == "gzipped") { + headers.setTransferEncoding("gzip"); + } else if (!mime_type.empty()) { + headers.setContentType(mime_type); + } + const auto header_result = filter_->decodeHeaders(headers, false /*end_stream*/); + ASSERT_EQ(Http::FilterHeadersStatus::StopIteration, header_result); + + const auto data_result = filter_->decodeData(decoded_buf, true /*end_stream*/); + ASSERT_EQ(Http::FilterDataStatus::Continue, data_result); + + // The best way to verify the generated JSON is to deserialize it and inspect it. + Request req; + TestUtility::loadFromJson(decoded_buf.toString(), req); + + ASSERT_TRUE(req.is_base64_encoded()); + ASSERT_FALSE(req.body().empty()); + ASSERT_STREQ(req.body().c_str(), "dGhpcyBzaG91bGQgZ2V0IGJhc2U2NCBlbmNvZGVk"); + + // reset the buffer for the next iteration + decoded_buf.drain(decoded_buf.length()); + decoded_buf.add(fake_binary_data); + } +} + +TEST_F(AwsLambdaFilterTest, EncodeHeadersEndStreamShouldSkip) { + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); + Http::TestResponseHeaderMapImpl headers; + auto result = filter_->encodeHeaders(headers, true /*end_stream*/); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, result); + + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + result = filter_->encodeHeaders(headers, true /*end_stream*/); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, result); +} + +/** + * If the Lambda function itself raises an error (syntax, exception, etc.) then we should skip + * encoding headers and skip the filter. + */ +TEST_F(AwsLambdaFilterTest, EncodeHeadersWithLambdaErrorShouldSkipAndContinue) { + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + Http::TestResponseHeaderMapImpl headers; + headers.setStatus(200); + headers.addCopy(Http::LowerCaseString("x-Amz-Function-Error"), "unhandled"); + auto result = filter_->encodeHeaders(headers, false /*end_stream*/); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, result); +} + +/** + * If Lambda returns a 5xx error then we should skip encoding headers and skip the filter. + */ +TEST_F(AwsLambdaFilterTest, EncodeHeadersWithLambda5xxShouldSkipAndContinue) { + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + Http::TestResponseHeaderMapImpl headers; + headers.setStatus(500); + auto result = filter_->encodeHeaders(headers, false /*end_stream*/); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, result); +} + +/** + * encodeHeaders() in a happy path should stop iteration. + */ +TEST_F(AwsLambdaFilterTest, EncodeHeadersStopsIteration) { + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + Http::TestResponseHeaderMapImpl headers; + headers.setStatus(200); + auto result = filter_->encodeHeaders(headers, false /*end_stream*/); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, result); +} + +/** + * encodeData() data in pass-through mode should simply return Continue. + * This is true whether end_stream is true or false. + */ +TEST_F(AwsLambdaFilterTest, EncodeDataInPassThroughMode) { + setupFilter({Arn, InvocationMode::Synchronous, true /*passthrough*/}); + Buffer::OwnedImpl buf; + filter_->resolveSettings(); + auto result = filter_->encodeData(buf, false /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); + + result = filter_->encodeData(buf, true /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); + + setupFilter({Arn, InvocationMode::Asynchronous, true /*passthrough*/}); + filter_->resolveSettings(); + result = filter_->encodeData(buf, false /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); + + result = filter_->encodeData(buf, true /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); +} + +/** + * encodeData() data in asynchronous mode should simply return Continue. + * This is true whether end_stream is true or false. + */ +TEST_F(AwsLambdaFilterTest, EncodeDataInAsynchrnous) { + setupFilter({Arn, InvocationMode::Asynchronous, false /*passthrough*/}); + Buffer::OwnedImpl buf; + filter_->resolveSettings(); + auto result = filter_->encodeData(buf, false /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); + + result = filter_->encodeData(buf, true /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); +} + +/** + * encodeData() data in JSON mode should stop iteration if end_stream is false. + */ +TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeStopIterationAndBuffer) { + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + Buffer::OwnedImpl buf; + filter_->resolveSettings(); + auto result = filter_->encodeData(buf, false /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, result); +} + +/** + * encodeData() data in JSON mode without a 'body' key should translate the 'headers' key to HTTP + * headers while ignoring any HTTP/2 pseudo-headers. + */ +TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeTransformToHttp) { + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + filter_->resolveSettings(); + Http::TestResponseHeaderMapImpl headers; + headers.setStatus(200); + filter_->encodeHeaders(headers, false /*end_stream*/); + + constexpr auto json_response = R"EOF( + { + "statusCode": 201, + "headers": { + "x-awesome-header": "awesome value", + ":other": "should_never_make_it" + }, + "cookies": ["session-id=42; Secure; HttpOnly", "user=joe"] + } + )EOF"; + + Buffer::OwnedImpl encoded_buf; + encoded_buf.add(json_response); + auto on_modify_encoding_buffer = [&encoded_buf](std::function cb) { + cb(encoded_buf); + }; + EXPECT_CALL(encoder_callbacks_, encodingBuffer).WillRepeatedly(Return(&encoded_buf)); + EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer) + .WillRepeatedly(Invoke(on_modify_encoding_buffer)); + + auto result = filter_->encodeData(encoded_buf, true /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); + + ASSERT_NE(nullptr, headers.Status()); + EXPECT_EQ("201", headers.Status()->value().getStringView()); + + EXPECT_EQ(nullptr, headers.get(Http::LowerCaseString(":other"))); + + const auto* custom_header = headers.get(Http::LowerCaseString("x-awesome-header")); + EXPECT_NE(custom_header, nullptr); + EXPECT_EQ("awesome value", custom_header->value().getStringView()); + + std::vector cookies; + headers.iterate( + [](const Http::HeaderEntry& entry, void* ctx) { + auto* list = static_cast*>(ctx); + if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { + list->emplace_back(entry.value().getStringView()); + } + return Http::HeaderMap::Iterate::Continue; + }, + &cookies); + + EXPECT_THAT(cookies, ElementsAre("session-id=42; Secure; HttpOnly", "user=joe")); +} + +/** + * encodeData() in JSON mode with a non-empty body should translate the body to plain text if it was + * base64-encoded. + */ +TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeBase64EncodedBody) { + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + filter_->resolveSettings(); + Http::TestResponseHeaderMapImpl headers; + headers.setStatus(200); + filter_->encodeHeaders(headers, false /*end_stream*/); + + constexpr auto json_base64_body = R"EOF( + { + "statusCode": 201, + "body": "Q29mZmVl", + "isBase64Encoded": true + } + )EOF"; + + constexpr auto json_plain_text_body = R"EOF( + { + "statusCode": 201, + "body": "Beans", + "isBase64Encoded": false + } + )EOF"; + + Buffer::OwnedImpl encoded_buf; + encoded_buf.add(json_base64_body); + auto on_modify_encoding_buffer = [&encoded_buf](std::function cb) { + cb(encoded_buf); + }; + EXPECT_CALL(encoder_callbacks_, encodingBuffer).WillRepeatedly(Return(&encoded_buf)); + EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer) + .WillRepeatedly(Invoke(on_modify_encoding_buffer)); + + auto result = filter_->encodeData(encoded_buf, true /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); + EXPECT_STREQ("Coffee", encoded_buf.toString().c_str()); + + encoded_buf.drain(encoded_buf.length()); + + encoded_buf.add(json_plain_text_body); + result = filter_->encodeData(encoded_buf, true /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); + EXPECT_STREQ("Beans", encoded_buf.toString().c_str()); +} + +/** + * Encode data in JSON mode _returning_ invalid JSON payload should result in a 500 error. + */ +TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeInvalidJson) { + setupFilter({Arn, InvocationMode::Synchronous, false /*passthrough*/}); + filter_->resolveSettings(); + Http::TestResponseHeaderMapImpl headers; + headers.setStatus(200); + filter_->encodeHeaders(headers, false /*end_stream*/); + + constexpr auto json_response = R"EOF( + + Does XML work?? + + )EOF"; + + Buffer::OwnedImpl encoded_buf; + encoded_buf.add(json_response); + auto on_modify_encoding_buffer = [&encoded_buf](std::function cb) { + cb(encoded_buf); + }; + EXPECT_CALL(encoder_callbacks_, encodingBuffer).WillRepeatedly(Return(&encoded_buf)); + EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer) + .WillRepeatedly(Invoke(on_modify_encoding_buffer)); + + auto result = filter_->encodeData(encoded_buf, true /*end_stream*/); + EXPECT_EQ(Http::FilterDataStatus::Continue, result); + EXPECT_EQ(0, encoded_buf.length()); + + ASSERT_NE(nullptr, headers.Status()); + EXPECT_EQ("500", headers.Status()->value().getStringView()); +} + } // namespace } // namespace AwsLambdaFilter } // namespace HttpFilters diff --git a/test/extensions/filters/http/aws_lambda/config_test.cc b/test/extensions/filters/http/aws_lambda/config_test.cc index 9b8eb6098332..b0119d759900 100644 --- a/test/extensions/filters/http/aws_lambda/config_test.cc +++ b/test/extensions/filters/http/aws_lambda/config_test.cc @@ -1,6 +1,7 @@ #include "envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.pb.h" #include "envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.pb.validate.h" +#include "extensions/filters/http/aws_lambda/aws_lambda_filter.h" #include "extensions/filters/http/aws_lambda/config.h" #include "test/mocks/server/mocks.h" @@ -9,6 +10,8 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +using ::testing::Truly; + namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -22,6 +25,35 @@ TEST(AwsLambdaFilterConfigTest, ValidConfigCreatesFilter) { const std::string yaml = R"EOF( arn: "arn:aws:lambda:region:424242:function:fun" payload_passthrough: true +invocation_mode: asynchronous + )EOF"; + + LambdaConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + testing::NiceMock context; + AwsLambdaFilterFactory factory; + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callbacks; + auto has_expected_settings = [](std::shared_ptr stream_filter) { + auto filter = std::static_pointer_cast(stream_filter); + const auto settings = filter->settingsForTest(); + return settings.payloadPassthrough() && + settings.invocationMode() == InvocationMode::Asynchronous; + }; + + EXPECT_CALL(filter_callbacks, addStreamFilter(Truly(has_expected_settings))); + cb(filter_callbacks); +} + +/** + * The default for passthrough is false. + * The default for invocation_mode is Synchronous. + */ +TEST(AwsLambdaFilterConfigTest, ValidConfigVerifyDefaults) { + const std::string yaml = R"EOF( +arn: "arn:aws:lambda:region:424242:function:fun" )EOF"; LambdaConfig proto_config; @@ -32,7 +64,14 @@ payload_passthrough: true Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context); Http::MockFilterChainFactoryCallbacks filter_callbacks; - EXPECT_CALL(filter_callbacks, addStreamFilter(_)); + auto has_expected_settings = [](std::shared_ptr stream_filter) { + auto filter = std::static_pointer_cast(stream_filter); + const auto settings = filter->settingsForTest(); + return settings.payloadPassthrough() == false && + settings.invocationMode() == InvocationMode::Synchronous; + }; + + EXPECT_CALL(filter_callbacks, addStreamFilter(Truly(has_expected_settings))); cb(filter_callbacks); } @@ -52,7 +91,35 @@ TEST(AwsLambdaFilterConfigTest, ValidPerRouteConfigCreatesFilter) { auto route_specific_config_ptr = factory.createRouteSpecificFilterConfig( proto_config, context, ProtobufMessage::getStrictValidationVisitor()); Http::MockFilterChainFactoryCallbacks filter_callbacks; - EXPECT_NE(route_specific_config_ptr, nullptr); + ASSERT_NE(route_specific_config_ptr, nullptr); + auto filter_settings_ptr = + std::static_pointer_cast(route_specific_config_ptr); + EXPECT_TRUE(filter_settings_ptr->payloadPassthrough()); + EXPECT_EQ(InvocationMode::Synchronous, filter_settings_ptr->invocationMode()); +} + +TEST(AwsLambdaFilterConfigTest, AsynchrnousPerRouteConfig) { + const std::string yaml = R"EOF( + invoke_config: + arn: "arn:aws:lambda:region:424242:function:fun" + payload_passthrough: false + invocation_mode: asynchronous + )EOF"; + + LambdaPerRouteConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + testing::NiceMock context; + AwsLambdaFilterFactory factory; + + auto route_specific_config_ptr = factory.createRouteSpecificFilterConfig( + proto_config, context, ProtobufMessage::getStrictValidationVisitor()); + Http::MockFilterChainFactoryCallbacks filter_callbacks; + ASSERT_NE(route_specific_config_ptr, nullptr); + auto filter_settings_ptr = + std::static_pointer_cast(route_specific_config_ptr); + EXPECT_FALSE(filter_settings_ptr->payloadPassthrough()); + EXPECT_EQ(InvocationMode::Asynchronous, filter_settings_ptr->invocationMode()); } } // namespace diff --git a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc index 4d6c551240b3..dfde16d77699 100644 --- a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc +++ b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc @@ -15,32 +15,32 @@ INSTANTIATE_TEST_SUITE_P(Protocols, BufferIntegrationTest, HttpProtocolIntegrationTest::protocolTestParamsToString); TEST_P(BufferIntegrationTest, RouterNotFoundBodyBuffer) { - config_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::defaultBufferFilter()); testRouterNotFoundWithBody(); } TEST_P(BufferIntegrationTest, RouterRequestAndResponseWithGiantBodyBuffer) { - config_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::defaultBufferFilter()); testRouterRequestAndResponseWithBody(4 * 1024 * 1024, 4 * 1024 * 1024, false); } TEST_P(BufferIntegrationTest, RouterHeaderOnlyRequestAndResponseBuffer) { - config_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::defaultBufferFilter()); testRouterHeaderOnlyRequestAndResponse(); } TEST_P(BufferIntegrationTest, RouterRequestAndResponseWithBodyBuffer) { - config_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::defaultBufferFilter()); testRouterRequestAndResponseWithBody(1024, 512, false); } TEST_P(BufferIntegrationTest, RouterRequestAndResponseWithZeroByteBodyBuffer) { - config_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::defaultBufferFilter()); testRouterRequestAndResponseWithBody(0, 0, false); } TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLength) { - config_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::defaultBufferFilter()); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -67,7 +67,7 @@ TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLength) { } TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLengthOnTrailers) { - config_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::defaultBufferFilter()); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -96,7 +96,7 @@ TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLengthOnTrailers) { } TEST_P(BufferIntegrationTest, RouterRequestBufferLimitExceeded) { - config_helper_.addFilter(ConfigHelper::SMALL_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::smallBufferFilter()); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -135,7 +135,7 @@ ConfigHelper::HttpModifierFunction overrideConfig(const std::string& json_config TEST_P(BufferIntegrationTest, RouteDisabled) { ConfigHelper::HttpModifierFunction mod = overrideConfig(R"EOF({"disabled": true})EOF"); config_helper_.addConfigModifier(mod); - config_helper_.addFilter(ConfigHelper::SMALL_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::smallBufferFilter()); config_helper_.setBufferLimits(1024, 1024); initialize(); @@ -162,7 +162,7 @@ TEST_P(BufferIntegrationTest, RouteOverride) { "max_request_bytes": 5242880 }})EOF"); config_helper_.addConfigModifier(mod); - config_helper_.addFilter(ConfigHelper::SMALL_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::smallBufferFilter()); initialize(); diff --git a/test/extensions/filters/http/common/compressor/compressor_filter_test.cc b/test/extensions/filters/http/common/compressor/compressor_filter_test.cc index 6f4379299330..a82c86444d0a 100644 --- a/test/extensions/filters/http/common/compressor/compressor_filter_test.cc +++ b/test/extensions/filters/http/common/compressor/compressor_filter_test.cc @@ -1,3 +1,5 @@ +#include + #include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" #include "common/protobuf/utility.h" @@ -90,7 +92,8 @@ class CompressorFilterTest : public testing::Test { void setUpFilter(std::string&& json) { envoy::extensions::filters::http::compressor::v3::Compressor compressor; TestUtility::loadFromJson(json, compressor); - config_.reset(new MockCompressorFilterConfig(compressor, "test.", stats_, runtime_, "test")); + config_ = + std::make_shared(compressor, "test.", stats_, runtime_, "test"); filter_ = std::make_unique(config_); filter_->setEncoderFilterCallbacks(encoder_callbacks_); } @@ -159,7 +162,7 @@ class CompressorFilterTest : public testing::Test { std::unique_ptr filter_; Buffer::OwnedImpl data_; std::string expected_str_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; NiceMock runtime_; NiceMock encoder_callbacks_; }; @@ -174,7 +177,9 @@ TEST_F(CompressorFilterTest, DecodeHeadersWithRuntimeDisabled) { } } )EOF"); - EXPECT_CALL(runtime_.snapshot_, getBoolean("foo_key", true)).WillOnce(Return(false)); + EXPECT_CALL(runtime_.snapshot_, getBoolean("foo_key", true)) + .Times(2) + .WillRepeatedly(Return(false)); doRequest({{":method", "get"}, {"accept-encoding", "deflate, test"}}, false); doResponseNoCompression({{":method", "get"}, {"content-length", "256"}}); } @@ -343,12 +348,13 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { { // Compressor "test2" from an independent filter chain should not overshadow "test". // The independence is simulated with a new instance DecoderFilterCallbacks set for "test2". - Stats::IsolatedStoreImpl stats; + Stats::TestUtil::TestStore stats; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; TestUtility::loadFromJson("{}", compressor); CompressorFilterConfigSharedPtr config2; - config2.reset(new MockCompressorFilterConfig(compressor, "test2.", stats, runtime, "test2")); + config2 = + std::make_shared(compressor, "test2.", stats, runtime, "test2"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter2->setDecoderFilterCallbacks(decoder_callbacks); @@ -365,12 +371,14 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { } { // check if the legacy "header_gzip" counter is incremented for gzip compression filter - Stats::IsolatedStoreImpl stats; + Stats::TestUtil::TestStore stats; + ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; TestUtility::loadFromJson("{}", compressor); CompressorFilterConfigSharedPtr config2; - config2.reset(new MockCompressorFilterConfig(compressor, "test2.", stats, runtime, "gzip")); + config2 = + std::make_shared(compressor, "test2.", stats, runtime, "gzip"); std::unique_ptr gzip_filter = std::make_unique(config2); NiceMock decoder_callbacks; gzip_filter->setDecoderFilterCallbacks(decoder_callbacks); @@ -383,12 +391,14 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { } { // check if identity stat is increased twice (the second time via the cached path). - Stats::IsolatedStoreImpl stats; + Stats::TestUtil::TestStore stats; + ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; TestUtility::loadFromJson("{}", compressor); CompressorFilterConfigSharedPtr config2; - config2.reset(new MockCompressorFilterConfig(compressor, "test2.", stats, runtime, "test")); + config2 = + std::make_shared(compressor, "test2.", stats, runtime, "test"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter2->setDecoderFilterCallbacks(decoder_callbacks); @@ -401,12 +411,14 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { } { // check if not_valid stat is increased twice (the second time via the cached path). - Stats::IsolatedStoreImpl stats; + Stats::TestUtil::TestStore stats; + ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; TestUtility::loadFromJson("{}", compressor); CompressorFilterConfigSharedPtr config2; - config2.reset(new MockCompressorFilterConfig(compressor, "test2.", stats, runtime, "test")); + config2 = + std::make_shared(compressor, "test2.", stats, runtime, "test"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter2->setDecoderFilterCallbacks(decoder_callbacks); @@ -419,15 +431,18 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { } { // Test that encoding decision is cached when used by multiple filters. - Stats::IsolatedStoreImpl stats; + Stats::TestUtil::TestStore stats; + ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; TestUtility::loadFromJson("{}", compressor); CompressorFilterConfigSharedPtr config1; - config1.reset(new MockCompressorFilterConfig(compressor, "test1.", stats, runtime, "test1")); + config1 = + std::make_shared(compressor, "test1.", stats, runtime, "test1"); std::unique_ptr filter1 = std::make_unique(config1); CompressorFilterConfigSharedPtr config2; - config2.reset(new MockCompressorFilterConfig(compressor, "test2.", stats, runtime, "test2")); + config2 = + std::make_shared(compressor, "test2.", stats, runtime, "test2"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter1->setDecoderFilterCallbacks(decoder_callbacks); @@ -446,15 +461,18 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { } { // Test that first registered filter is used when handling wildcard. - Stats::IsolatedStoreImpl stats; + Stats::TestUtil::TestStore stats; + ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; TestUtility::loadFromJson("{}", compressor); CompressorFilterConfigSharedPtr config1; - config1.reset(new MockCompressorFilterConfig(compressor, "test1.", stats, runtime, "test1")); + config1 = + std::make_shared(compressor, "test1.", stats, runtime, "test1"); std::unique_ptr filter1 = std::make_unique(config1); CompressorFilterConfigSharedPtr config2; - config2.reset(new MockCompressorFilterConfig(compressor, "test2.", stats, runtime, "test2")); + config2 = + std::make_shared(compressor, "test2.", stats, runtime, "test2"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter1->setDecoderFilterCallbacks(decoder_callbacks); diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5107908850483200.fuzz b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5107908850483200.fuzz new file mode 100644 index 000000000000..e168fb0e6faf --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5107908850483200.fuzz @@ -0,0 +1,9 @@ +config { + name: "envoy.filters.http.header_to_metadata" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config" + value: "\n\036\n\001=\012\014\n\002Ae\022\001]\032\001{(\001\032\t\n\001]\022\001]\032\001{ \001\n&\n\001=\032\037\n\027envoy.filters.http.rbac\022\001]\032\001{ \001\n\020\n\001=\032\t\n\001]\022\001]\032\001{ \001" + } +} +data { +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5969746626609152 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5969746626609152 new file mode 100644 index 000000000000..88707018cb14 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5969746626609152 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.dynamic_forward_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig" + value: "\n\t\n\002Ae\032\003\020\200N" + } +} diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index 260e4476cb01..2f97166f60a7 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -75,7 +75,7 @@ class UberFilterFuzzer { headers.setHost("foo.com"); } - if (data.data().size() == 0 && !data.has_trailers()) { + if (data.data().empty() && !data.has_trailers()) { end_stream = true; } ENVOY_LOG_MISC(debug, "Decoding headers: {} ", data.headers().DebugString()); @@ -126,8 +126,8 @@ class UberFilterFuzzer { } void reset() { - if (filter_.get() != nullptr) { - filter_.get()->onDestroy(); + if (filter_ != nullptr) { + filter_->onDestroy(); } filter_.reset(); } diff --git a/test/extensions/filters/http/common/mock.cc b/test/extensions/filters/http/common/mock.cc index 8244cbd51b1d..45129c0edc5c 100644 --- a/test/extensions/filters/http/common/mock.cc +++ b/test/extensions/filters/http/common/mock.cc @@ -21,7 +21,7 @@ MockUpstream::MockUpstream(Upstream::MockClusterManager& mock_cm, const std::str } else { response_message->body().reset(nullptr); } - cb.onSuccess(std::move(response_message)); + cb.onSuccess(request_, std::move(response_message)); return &request_; })); } @@ -33,7 +33,7 @@ MockUpstream::MockUpstream(Upstream::MockClusterManager& mock_cm, .WillByDefault(testing::Invoke( [this, reason](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - cb.onFailure(reason); + cb.onFailure(request_, reason); return &request_; })); } diff --git a/test/extensions/filters/http/cors/cors_filter_test.cc b/test/extensions/filters/http/cors/cors_filter_test.cc index 407361159a69..b045ed78987d 100644 --- a/test/extensions/filters/http/cors/cors_filter_test.cc +++ b/test/extensions/filters/http/cors/cors_filter_test.cc @@ -64,7 +64,7 @@ class CorsFilterTest : public testing::Test { NiceMock decoder_callbacks_; NiceMock encoder_callbacks_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; CorsFilterConfigSharedPtr config_; CorsFilter filter_; Buffer::OwnedImpl data_; diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index bd7b392389d5..957ed37467e7 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -179,7 +179,7 @@ TEST_F(HttpFilterTest, ErrorFailClose) { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -196,7 +196,9 @@ TEST_F(HttpFilterTest, ErrorFailClose) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Error; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.error").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.error").value()); EXPECT_EQ(1U, config_->stats().error_.value()); } @@ -216,7 +218,7 @@ TEST_F(HttpFilterTest, ErrorCustomStatusCode) { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -233,7 +235,9 @@ TEST_F(HttpFilterTest, ErrorCustomStatusCode) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Error; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.error").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.error").value()); EXPECT_EQ(1U, config_->stats().error_.value()); EXPECT_EQ("ext_authz_error", filter_callbacks_.details_); } @@ -253,7 +257,7 @@ TEST_F(HttpFilterTest, ErrorOpen) { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -265,7 +269,9 @@ TEST_F(HttpFilterTest, ErrorOpen) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Error; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.error").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.error").value()); EXPECT_EQ(1U, config_->stats().error_.value()); } @@ -287,7 +293,7 @@ TEST_F(HttpFilterTest, ImmediateErrorOpen) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Error; - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::make_unique(response)); @@ -297,10 +303,12 @@ TEST_F(HttpFilterTest, ImmediateErrorOpen) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.error").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.error").value()); EXPECT_EQ(1U, filter_callbacks_.clusterInfo() ->statsScope() - .counter("ext_authz.failure_mode_allowed") + .counterFromString("ext_authz.failure_mode_allowed") .value()); EXPECT_EQ(1U, config_->stats().error_.value()); EXPECT_EQ(1U, config_->stats().failure_mode_allowed_.value()); @@ -338,7 +346,7 @@ TEST_F(HttpFilterTest, RequestDataIsTooLarge) { EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(1); EXPECT_CALL(connection_, remoteAddress()).Times(0); EXPECT_CALL(connection_, localAddress()).Times(0); - EXPECT_CALL(*client_, check(_, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); @@ -370,7 +378,7 @@ TEST_F(HttpFilterTest, RequestDataWithPartialMessage) { EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)).Times(1); + EXPECT_CALL(*client_, check(_, _, _, _)).Times(1); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); @@ -410,7 +418,7 @@ TEST_F(HttpFilterTest, RequestDataWithSmallBuffer) { EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)).Times(1); + EXPECT_CALL(*client_, check(_, _, _, _)).Times(1); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); @@ -434,7 +442,7 @@ TEST_F(HttpFilterTest, AuthWithRequestData) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -462,7 +470,7 @@ TEST_F(HttpFilterTest, HeaderOnlyRequest) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -491,7 +499,7 @@ TEST_F(HttpFilterTest, UpgradeWebsocketRequest) { request_headers_.addCopy(Http::Headers::get().Upgrade, Http::Headers::get().UpgradeValues.WebSocket); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -519,7 +527,7 @@ TEST_F(HttpFilterTest, H2UpgradeRequest) { request_headers_.addCopy(Http::Headers::get().Protocol, Http::Headers::get().ProtocolStrings.Http2String); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -544,7 +552,7 @@ TEST_F(HttpFilterTest, HeaderOnlyRequestWithStream) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -572,7 +580,7 @@ TEST_F(HttpFilterTest, ClearCache) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -592,7 +600,8 @@ TEST_F(HttpFilterTest, ClearCache) { response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); EXPECT_EQ(1U, config_->stats().ok_.value()); } @@ -612,7 +621,7 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAppendOnly) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -631,7 +640,8 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAppendOnly) { response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); EXPECT_EQ(1U, config_->stats().ok_.value()); } @@ -651,7 +661,7 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAddOnly) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -670,7 +680,8 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAddOnly) { response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); EXPECT_EQ(1U, config_->stats().ok_.value()); } @@ -689,7 +700,7 @@ TEST_F(HttpFilterTest, NoClearCacheRoute) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -707,7 +718,8 @@ TEST_F(HttpFilterTest, NoClearCacheRoute) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); EXPECT_EQ(1U, config_->stats().ok_.value()); } @@ -723,7 +735,7 @@ TEST_F(HttpFilterTest, NoClearCacheRouteConfig) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -743,7 +755,8 @@ TEST_F(HttpFilterTest, NoClearCacheRouteConfig) { response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); EXPECT_EQ(1U, config_->stats().ok_.value()); } @@ -766,7 +779,7 @@ TEST_F(HttpFilterTest, NoClearCacheRouteDeniedResponse) { response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; auto response_ptr = std::make_unique(response); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::move(response_ptr)); @@ -778,7 +791,9 @@ TEST_F(HttpFilterTest, NoClearCacheRouteDeniedResponse) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); EXPECT_EQ(1U, config_->stats().denied_.value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.denied").value()); EXPECT_EQ("ext_authz_denied", filter_callbacks_.details_); } @@ -814,7 +829,7 @@ TEST_F(HttpFilterTest, MetadataContext) { prepareCheck(); envoy::service::auth::v3::CheckRequest check_request; - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce(WithArgs<1>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param) -> void { check_request = check_param; }))); @@ -863,7 +878,7 @@ TEST_F(HttpFilterTest, FilterDisabled) { .WillByDefault(Return(false)); // Make sure check is not called. - EXPECT_CALL(*client_, check(_, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); } @@ -889,7 +904,7 @@ TEST_F(HttpFilterTest, FilterEnabled) { .WillByDefault(Return(true)); // Make sure check is called once. - EXPECT_CALL(*client_, check(_, _, _)).Times(1); + EXPECT_CALL(*client_, check(_, _, _, _)).Times(1); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); @@ -927,7 +942,7 @@ TEST_F(HttpFilterTestParam, ContextExtensions) { // Save the check request from the check call. envoy::service::auth::v3::CheckRequest check_request; - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce(WithArgs<1>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param) -> void { check_request = check_param; }))); @@ -961,7 +976,7 @@ TEST_F(HttpFilterTestParam, DisabledOnRoute) { // baseline: make sure that when not disabled, check is called test_disable(false); - EXPECT_CALL(*client_, check(_, _, testing::A())).Times(1); + EXPECT_CALL(*client_, check(_, _, testing::A(), _)).Times(1); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); @@ -969,11 +984,57 @@ TEST_F(HttpFilterTestParam, DisabledOnRoute) { // test that disabling works test_disable(true); // Make sure check is not called. - EXPECT_CALL(*client_, check(_, _, _)).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); // Engage the filter. EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); } +// Test that filter can be disabled with route config. +TEST_F(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { + envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings; + FilterConfigPerRoute auth_per_route(settings); + + ON_CALL(*filter_callbacks_.route_, perFilterConfig(HttpFilterNames::get().ExtAuthorization)) + .WillByDefault(Return(&auth_per_route)); + + auto test_disable = [&](bool disabled) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + failure_mode_allow: false + with_request_body: + max_request_bytes: 1 + allow_partial_message: false + )EOF"); + + // Set the filter disabled setting. + settings.set_disabled(disabled); + // Initialize the route's per filter config. + auth_per_route = FilterConfigPerRoute(settings); + }; + + test_disable(false); + ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); + // When filter is not disabled, setDecoderBufferLimit is called. + EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(1); + EXPECT_CALL(connection_, remoteAddress()).Times(0); + EXPECT_CALL(connection_, localAddress()).Times(0); + EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + + // To test that disabling the filter works. + test_disable(true); + EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + // Make sure that setDecoderBufferLimit is skipped. + EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); +} + // Test that the request continues when the filter_callbacks has no route. TEST_F(HttpFilterTestParam, NoRoute) { EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr)); @@ -988,7 +1049,7 @@ TEST_F(HttpFilterTestParam, OkResponse) { prepareCheck(); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1004,7 +1065,8 @@ TEST_F(HttpFilterTestParam, OkResponse) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); EXPECT_EQ(1U, config_->stats().ok_.value()); // decodeData() and decodeTrailers() are called after continueDecoding(). EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -1021,7 +1083,7 @@ TEST_F(HttpFilterTestParam, ImmediateOkResponse) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::make_unique(response)); @@ -1030,7 +1092,8 @@ TEST_F(HttpFilterTestParam, ImmediateOkResponse) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); EXPECT_EQ(1U, config_->stats().ok_.value()); } @@ -1049,7 +1112,7 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { auto response_ptr = std::make_unique(response); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::move(response_ptr)); @@ -1057,7 +1120,9 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.denied").value()); EXPECT_EQ(1U, config_->stats().denied_.value()); // When request is denied, no call to continueDecoding(). As a result, decodeData() and // decodeTrailer() will not be called. @@ -1088,7 +1153,7 @@ TEST_F(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { auto response_ptr = std::make_unique(response); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::move(response_ptr)); @@ -1112,7 +1177,7 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponse) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(std::make_unique(response)); @@ -1120,7 +1185,9 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponse) { EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.denied").value()); EXPECT_EQ(1U, config_->stats().denied_.value()); // When request is denied, no call to continueDecoding(). As a result, decodeData() and // decodeTrailer() will not be called. @@ -1131,7 +1198,7 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith401) { InSequence s; prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1149,9 +1216,13 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith401) { response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Unauthorized; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.denied").value()); EXPECT_EQ(1U, config_->stats().denied_.value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_4xx").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("upstream_rq_4xx").value()); } // Test that a denied response results in the connection closing with a 403 response to the client. @@ -1159,7 +1230,7 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith403) { InSequence s; prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1177,10 +1248,16 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith403) { response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Forbidden; request_callbacks_->onComplete(std::make_unique(response)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.denied").value()); EXPECT_EQ(1U, config_->stats().denied_.value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_4xx").value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_403").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("upstream_rq_4xx").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("upstream_rq_403").value()); } // Verify that authz response memory is not used after free. @@ -1197,7 +1274,7 @@ TEST_F(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { std::make_unique(response); prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1223,10 +1300,16 @@ TEST_F(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { })); request_callbacks_->onComplete(std::move(response_ptr)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.denied").value()); EXPECT_EQ(1U, config_->stats().denied_.value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_4xx").value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_403").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("upstream_rq_4xx").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("upstream_rq_403").value()); } // Verify that authz denied response headers overrides the existing encoding headers, @@ -1249,7 +1332,7 @@ TEST_F(HttpFilterTestParam, OverrideEncodingHeaders) { std::make_unique(response); prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1288,10 +1371,16 @@ TEST_F(HttpFilterTestParam, OverrideEncodingHeaders) { })); request_callbacks_->onComplete(std::move(response_ptr)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ext_authz.denied").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.denied").value()); EXPECT_EQ(1U, config_->stats().denied_.value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_4xx").value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_403").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("upstream_rq_4xx").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromString("upstream_rq_403").value()); } // Test that when a connection awaiting a authorization response is canceled then the @@ -1300,7 +1389,7 @@ TEST_F(HttpFilterTestParam, ResetDuringCall) { InSequence s; prepareCheck(); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -1332,7 +1421,7 @@ TEST_F(HttpFilterTestParam, NoCluster) { // Save the check request from the check call. envoy::service::auth::v3::CheckRequest check_request; - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce(WithArgs<1>(Invoke([&](const envoy::service::auth::v3::CheckRequest& check_param) -> void { check_request = check_param; }))); // Make sure that filter chain is not continued and the call has been invoked. diff --git a/test/extensions/filters/http/fault/fault_filter_test.cc b/test/extensions/filters/http/fault/fault_filter_test.cc index eb83202d83c8..0de6912992aa 100644 --- a/test/extensions/filters/http/fault/fault_filter_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_test.cc @@ -128,7 +128,7 @@ class FaultFilterTest : public testing::Test { const std::string v2_empty_fault_config_yaml = "{}"; void SetUpTest(const envoy::extensions::filters::http::fault::v3::HTTPFault fault) { - config_.reset(new FaultFilterConfig(fault, runtime_, "prefix.", stats_, time_system_)); + config_ = std::make_shared(fault, runtime_, "prefix.", stats_, time_system_); filter_ = std::make_unique(config_); filter_->setDecoderFilterCallbacks(decoder_filter_callbacks_); filter_->setEncoderFilterCallbacks(encoder_filter_callbacks_); diff --git a/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc b/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc index 178418616833..fb2188af990a 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc +++ b/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc @@ -89,11 +89,11 @@ TEST_F(GrpcHttp1BridgeFilterTest, StatsHttp2HeaderOnlyResponse) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, true)); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.failure") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.failure") .value()); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.total") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.total") .value()); } @@ -114,11 +114,11 @@ TEST_F(GrpcHttp1BridgeFilterTest, StatsHttp2NormalResponse) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers)); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.success") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.success") .value()); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.total") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.total") .value()); } @@ -137,11 +137,11 @@ TEST_F(GrpcHttp1BridgeFilterTest, StatsHttp2ContentTypeGrpcPlusProto) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers)); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.success") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.success") .value()); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.total") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.total") .value()); } diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 4b6cd69535df..f396b6704584 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -12,7 +12,6 @@ #include "gtest/gtest.h" using Envoy::Protobuf::TextFormat; -using Envoy::Protobuf::util::MessageDifferencer; using Envoy::ProtobufUtil::Status; using Envoy::ProtobufUtil::error::Code; using Envoy::ProtobufWkt::Empty; diff --git a/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc b/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc index f89d0f66936f..1446c4d8874d 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc @@ -16,7 +16,7 @@ namespace { class HttpBodyUtilsTest : public testing::Test { public: - HttpBodyUtilsTest() {} + HttpBodyUtilsTest() = default; template void basicTest(const std::string& content, const std::string& content_type, diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 613b9acaa7a2..74513d9c0c7d 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -25,8 +25,6 @@ using testing::_; using testing::Invoke; using testing::NiceMock; -using Envoy::Protobuf::MethodDescriptor; - using Envoy::Protobuf::FileDescriptorProto; using Envoy::Protobuf::FileDescriptorSet; using Envoy::Protobuf::util::MessageDifferencer; diff --git a/test/extensions/filters/http/grpc_stats/BUILD b/test/extensions/filters/http/grpc_stats/BUILD index db945cb74a1c..15dd7ab9aeff 100644 --- a/test/extensions/filters/http/grpc_stats/BUILD +++ b/test/extensions/filters/http/grpc_stats/BUILD @@ -21,6 +21,7 @@ envoy_extension_cc_test( "//test/common/buffer:utility_lib", "//test/common/stream_info:test_util", "//test/mocks/server:server_mocks", + "//test/test_common:logging_lib", "@envoy_api//envoy/extensions/filters/http/grpc_stats/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/grpc_stats/config_test.cc b/test/extensions/filters/http/grpc_stats/config_test.cc index f170badb49ba..c2f753edf088 100644 --- a/test/extensions/filters/http/grpc_stats/config_test.cc +++ b/test/extensions/filters/http/grpc_stats/config_test.cc @@ -8,11 +8,13 @@ #include "test/common/buffer/utility.h" #include "test/common/stream_info/test_util.h" #include "test/mocks/server/mocks.h" +#include "test/test_common/logging.h" #include "gmock/gmock.h" #include "gtest/gtest.h" using testing::_; +using testing::Return; namespace Envoy { namespace Extensions { @@ -22,11 +24,9 @@ namespace { class GrpcStatsFilterConfigTest : public testing::Test { protected: - void initialize(bool emit_filter_state) { - envoy::extensions::filters::http::grpc_stats::v3::FilterConfig config{}; - config.set_emit_filter_state(emit_filter_state); - GrpcStatsFilterConfig factory; - Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, "stats", context_); + void initialize() { + GrpcStatsFilterConfigFactory factory; + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config_, "stats", context_); Http::MockFilterChainFactoryCallbacks filter_callback; ON_CALL(filter_callback, addStreamFilter(_)).WillByDefault(testing::SaveArg<0>(&filter_)); @@ -37,6 +37,27 @@ class GrpcStatsFilterConfigTest : public testing::Test { filter_->setDecoderFilterCallbacks(decoder_callbacks_); } + void addAllowlistEntry() { + auto* allowlist = config_.mutable_individual_method_stats_allowlist(); + auto* services = allowlist->mutable_services(); + auto* service = services->Add(); + service->set_name("BadCompanions"); + *service->mutable_method_names()->Add() = "GetBadCompanions"; + *service->mutable_method_names()->Add() = "AnotherMethod"; + } + + void doRequestResponse(Http::TestRequestHeaderMapImpl& request_headers) { + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); + Buffer::OwnedImpl data("hello"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, false)); + Http::TestResponseTrailerMapImpl response_trailers{{"grpc-status", "0"}}; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); + } + + envoy::extensions::filters::http::grpc_stats::v3::FilterConfig config_; NiceMock context_; std::shared_ptr filter_; NiceMock decoder_callbacks_; @@ -44,7 +65,8 @@ class GrpcStatsFilterConfigTest : public testing::Test { }; TEST_F(GrpcStatsFilterConfigTest, StatsHttp2HeaderOnlyResponse) { - initialize(false); + config_.mutable_stats_for_all_methods()->set_value(true); + initialize(); Http::TestRequestHeaderMapImpl request_headers{ {"content-type", "application/grpc"}, {":path", "/lyft.users.BadCompanions/GetBadCompanions"}}; @@ -61,65 +83,256 @@ TEST_F(GrpcStatsFilterConfigTest, StatsHttp2HeaderOnlyResponse) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.failure") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.failure") .value()); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.total") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.total") .value()); EXPECT_FALSE(stream_info_.filterState()->hasDataWithName(HttpFilterNames::get().GrpcStats)); } TEST_F(GrpcStatsFilterConfigTest, StatsHttp2NormalResponse) { - initialize(false); + config_.mutable_stats_for_all_methods()->set_value(true); + initialize(); Http::TestRequestHeaderMapImpl request_headers{ {"content-type", "application/grpc"}, {":path", "/lyft.users.BadCompanions/GetBadCompanions"}}; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); + doRequestResponse(request_headers); - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); - Buffer::OwnedImpl data("hello"); - EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, false)); - Http::TestResponseTrailerMapImpl response_trailers{{"grpc-status", "0"}}; - EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.success") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.success") .value()); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.total") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.total") .value()); EXPECT_FALSE(stream_info_.filterState()->hasDataWithName(HttpFilterNames::get().GrpcStats)); } TEST_F(GrpcStatsFilterConfigTest, StatsHttp2ContentTypeGrpcPlusProto) { - initialize(false); + config_.mutable_stats_for_all_methods()->set_value(true); + initialize(); Http::TestRequestHeaderMapImpl request_headers{ {"content-type", "application/grpc+proto"}, {":path", "/lyft.users.BadCompanions/GetBadCompanions"}}; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); + doRequestResponse(request_headers); - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); - Http::TestResponseTrailerMapImpl response_trailers{{"grpc-status", "0"}}; - EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.success") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.success") .value()); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.total") + .counterFromString("grpc.lyft.users.BadCompanions.GetBadCompanions.total") .value()); EXPECT_FALSE(stream_info_.filterState()->hasDataWithName(HttpFilterNames::get().GrpcStats)); } +// Test that an allowlist match results in method-named stats. +TEST_F(GrpcStatsFilterConfigTest, StatsAllowlistMatch) { + addAllowlistEntry(); + initialize(); + + Http::TestRequestHeaderMapImpl request_headers{{"content-type", "application/grpc"}, + {":path", "/BadCompanions/GetBadCompanions"}}; + + doRequestResponse(request_headers); + + EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.success") + .value()); + EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.total") + .value()); +} + +// Test that an allowlist method mismatch results in going to the generic stat. +TEST_F(GrpcStatsFilterConfigTest, StatsAllowlistMismatchMethod) { + addAllowlistEntry(); + initialize(); + + Http::TestRequestHeaderMapImpl request_headers{{"content-type", "application/grpc"}, + {":path", "/BadCompanions/GetGoodCompanions"}}; + + doRequestResponse(request_headers); + + EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetGoodCompanions.success") + .value()); + EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetGoodCompanions.total") + .value()); + EXPECT_EQ( + 1UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.success").value()); + EXPECT_EQ(1UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.total").value()); +} + +// Test that an allowlist service mismatch results in going to the generic stat. +TEST_F(GrpcStatsFilterConfigTest, StatsAllowlistMismatchService) { + addAllowlistEntry(); + initialize(); + + Http::TestRequestHeaderMapImpl request_headers{{"content-type", "application/grpc"}, + {":path", "/GoodCompanions/GetBadCompanions"}}; + + doRequestResponse(request_headers); + + EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.GoodCompanions.GetBadCompanions.success") + .value()); + EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.GoodCompanions.GetBadCompanions.total") + .value()); + EXPECT_EQ( + 1UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.success").value()); + EXPECT_EQ(1UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.total").value()); +} + +// Test that any method results in going to the generic stat, when stats_for_all_methods == false. +TEST_F(GrpcStatsFilterConfigTest, DisableStatsForAllMethods) { + config_.mutable_stats_for_all_methods()->set_value(false); + initialize(); + + Http::TestRequestHeaderMapImpl request_headers{{"content-type", "application/grpc"}, + {":path", "/BadCompanions/GetBadCompanions"}}; + + doRequestResponse(request_headers); + + EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.success") + .value()); + EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.total") + .value()); + EXPECT_EQ( + 1UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.success").value()); + EXPECT_EQ(1UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.total").value()); +} + +// Test that any method results in a specific stat, when stats_for_all_methods isn't set +// at all. +// +// This is deprecated behavior and will be changed during the deprecation window. +TEST_F(GrpcStatsFilterConfigTest, StatsForAllMethodsDefaultSetting) { + EXPECT_CALL( + context_.runtime_loader_.snapshot_, + deprecatedFeatureEnabled( + "envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default", _)) + .WillOnce(Invoke([](absl::string_view, bool default_value) { return default_value; })); + EXPECT_LOG_CONTAINS("warn", + "Using deprecated default value for " + "'envoy.extensions.filters.http.grpc_stats.v3.FilterConfig.stats_for_all_" + "methods'", + initialize()); + + Http::TestRequestHeaderMapImpl request_headers{{"content-type", "application/grpc"}, + {":path", "/BadCompanions/GetBadCompanions"}}; + + doRequestResponse(request_headers); + + EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.success") + .value()); + EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.total") + .value()); + EXPECT_EQ( + 0UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.success").value()); + EXPECT_EQ(0UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.total").value()); +} + +// Test that any method results in a specific stat, when stats_for_all_methods isn't set +// at all. +// +// This is deprecated behavior and will be changed during the deprecation window. +TEST_F(GrpcStatsFilterConfigTest, StatsForAllMethodsDefaultSettingRuntimeOverrideTrue) { + EXPECT_CALL( + context_.runtime_loader_.snapshot_, + deprecatedFeatureEnabled( + "envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default", _)) + .WillOnce(Return(true)); + EXPECT_LOG_CONTAINS("warn", + "Using deprecated default value for " + "'envoy.extensions.filters.http.grpc_stats.v3.FilterConfig.stats_for_all_" + "methods'", + initialize()); + + Http::TestRequestHeaderMapImpl request_headers{{"content-type", "application/grpc"}, + {":path", "/BadCompanions/GetBadCompanions"}}; + + doRequestResponse(request_headers); + + EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.success") + .value()); + EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.total") + .value()); + EXPECT_EQ( + 0UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.success").value()); + EXPECT_EQ(0UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.total").value()); +} + +// Test that the runtime override for the deprecated previous default behavior works. +TEST_F(GrpcStatsFilterConfigTest, StatsForAllMethodsDefaultSettingRuntimeOverrideFalse) { + EXPECT_CALL( + context_.runtime_loader_.snapshot_, + deprecatedFeatureEnabled( + "envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default", _)) + .WillOnce(Return(false)); + initialize(); + + Http::TestRequestHeaderMapImpl request_headers{{"content-type", "application/grpc"}, + {":path", "/BadCompanions/GetBadCompanions"}}; + + doRequestResponse(request_headers); + + EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.success") + .value()); + EXPECT_EQ(0UL, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc.BadCompanions.GetBadCompanions.total") + .value()); + EXPECT_EQ( + 1UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.success").value()); + EXPECT_EQ(1UL, + decoder_callbacks_.clusterInfo()->statsScope().counterFromString("grpc.total").value()); +} + TEST_F(GrpcStatsFilterConfigTest, MessageCounts) { - initialize(true); + config_.mutable_stats_for_all_methods()->set_value(true); + config_.set_emit_filter_state(true); + initialize(); + Http::TestRequestHeaderMapImpl request_headers{ {"content-type", "application/grpc+proto"}, {":path", "/lyft.users.BadCompanions/GetBadCompanions"}}; @@ -136,13 +349,14 @@ TEST_F(GrpcStatsFilterConfigTest, MessageCounts) { EXPECT_EQ(2U, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count") + .counterFromString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count") + .value()); + EXPECT_EQ(0U, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count") .value()); - EXPECT_EQ(0U, - decoder_callbacks_.clusterInfo() - ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count") - .value()); const auto& data = stream_info_.filterState()->getDataReadOnly( HttpFilterNames::get().GrpcStats); EXPECT_EQ(2U, data.request_message_count); @@ -158,26 +372,28 @@ TEST_F(GrpcStatsFilterConfigTest, MessageCounts) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, false)); EXPECT_EQ(2U, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count") + .counterFromString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count") + .value()); + EXPECT_EQ(2U, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count") .value()); - EXPECT_EQ(2U, - decoder_callbacks_.clusterInfo() - ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count") - .value()); EXPECT_EQ(2U, data.request_message_count); EXPECT_EQ(2U, data.response_message_count); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(*b1, true)); EXPECT_EQ(2U, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count") + .counterFromString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count") + .value()); + EXPECT_EQ(3U, decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count") .value()); - EXPECT_EQ(3U, - decoder_callbacks_.clusterInfo() - ->statsScope() - .counter("grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count") - .value()); EXPECT_EQ(2U, data.request_message_count); EXPECT_EQ(3U, data.response_message_count); diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc index 7f132076fec2..8828f47c62c1 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc @@ -219,13 +219,14 @@ TEST_P(GrpcWebFilterTest, StatsNormalResponse) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false)); Http::TestResponseTrailerMapImpl response_trailers{{"grpc-status", "0"}}; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers)); + EXPECT_EQ(1UL, + decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc-web.lyft.users.BadCompanions.GetBadCompanions.success") + .value()); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc-web.lyft.users.BadCompanions.GetBadCompanions.success") - .value()); - EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() - ->statsScope() - .counter("grpc-web.lyft.users.BadCompanions.GetBadCompanions.total") + .counterFromString("grpc-web.lyft.users.BadCompanions.GetBadCompanions.total") .value()); } @@ -240,13 +241,14 @@ TEST_P(GrpcWebFilterTest, StatsErrorResponse) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data, false)); Http::TestResponseTrailerMapImpl response_trailers{{"grpc-status", "1"}}; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers)); + EXPECT_EQ(1UL, + decoder_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("grpc-web.lyft.users.BadCompanions.GetBadCompanions.failure") + .value()); EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() ->statsScope() - .counter("grpc-web.lyft.users.BadCompanions.GetBadCompanions.failure") - .value()); - EXPECT_EQ(1UL, decoder_callbacks_.clusterInfo() - ->statsScope() - .counter("grpc-web.lyft.users.BadCompanions.GetBadCompanions.total") + .counterFromString("grpc-web.lyft.users.BadCompanions.GetBadCompanions.total") .value()); } diff --git a/test/extensions/filters/http/gzip/gzip_filter_test.cc b/test/extensions/filters/http/gzip/gzip_filter_test.cc index 28035fd6510e..24f715b569e7 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_test.cc @@ -41,7 +41,7 @@ class GzipFilterTest : public testing::Test { Json::ObjectSharedPtr config = Json::Factory::loadFromString(json); envoy::extensions::filters::http::gzip::v3::Gzip gzip; TestUtility::loadFromJson(json, gzip); - config_.reset(new GzipFilterConfig(gzip, "test.", stats_, runtime_)); + config_ = std::make_shared(gzip, "test.", stats_, runtime_); filter_ = std::make_unique(config_); filter_->setEncoderFilterCallbacks(encoder_callbacks_); filter_->setDecoderFilterCallbacks(decoder_callbacks_); @@ -93,9 +93,8 @@ class GzipFilterTest : public testing::Test { } void expectValidFinishedBuffer(const uint32_t content_length) { - uint64_t num_comp_slices = data_.getRawSlices(nullptr, 0); - absl::FixedArray compressed_slices(num_comp_slices); - data_.getRawSlices(compressed_slices.begin(), num_comp_slices); + Buffer::RawSliceVector compressed_slices = data_.getRawSlices(); + const uint64_t num_comp_slices = compressed_slices.size(); const std::string header_hex_str = Hex::encode( reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); @@ -158,7 +157,7 @@ class GzipFilterTest : public testing::Test { Decompressor::ZlibDecompressorImpl decompressor_; Buffer::OwnedImpl decompressed_data_; std::string expected_str_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; NiceMock runtime_; NiceMock encoder_callbacks_; NiceMock decoder_callbacks_; @@ -176,7 +175,9 @@ TEST_F(GzipFilterTest, RuntimeDisabled) { } } )EOF"); - EXPECT_CALL(runtime_.snapshot_, getBoolean("foo_key", true)).WillOnce(Return(false)); + EXPECT_CALL(runtime_.snapshot_, getBoolean("foo_key", true)) + .Times(2) + .WillRepeatedly(Return(false)); doRequest({{":method", "get"}, {"accept-encoding", "deflate, gzip"}}, false); doResponseNoCompression({{":method", "get"}, {"content-length", "256"}}); } diff --git a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc index 3ac11b3e9826..2dc2c3ec845e 100644 --- a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc +++ b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc @@ -1,3 +1,5 @@ +#include + #include "envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.h" #include "common/common/base64.h" @@ -41,8 +43,8 @@ class HeaderToMetadataTest : public testing::Test { void initializeFilter(const std::string& yaml) { envoy::extensions::filters::http::header_to_metadata::v3::Config config; TestUtility::loadFromYaml(yaml, config); - config_.reset(new Config(config)); - filter_.reset(new HeaderToMetadataFilter(config_)); + config_ = std::make_shared(config); + filter_ = std::make_shared(config_); filter_->setDecoderFilterCallbacks(decoder_callbacks_); filter_->setEncoderFilterCallbacks(encoder_callbacks_); } diff --git a/test/extensions/filters/http/health_check/health_check_test.cc b/test/extensions/filters/http/health_check/health_check_test.cc index c24e3a418c41..54f5306cdb47 100644 --- a/test/extensions/filters/http/health_check/health_check_test.cc +++ b/test/extensions/filters/http/health_check/health_check_test.cc @@ -37,7 +37,8 @@ class HealthCheckFilterTest : public testing::Test { if (caching) { cache_timer_ = new Event::MockTimer(&dispatcher_); EXPECT_CALL(*cache_timer_, enableTimer(_, _)); - cache_manager_.reset(new HealthCheckCacheManager(dispatcher_, std::chrono::milliseconds(1))); + cache_manager_ = + std::make_shared(dispatcher_, std::chrono::milliseconds(1)); } prepareFilter(pass_through); diff --git a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc index dc1a01f11015..e99f427275a8 100644 --- a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc +++ b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc @@ -45,7 +45,7 @@ request_type: internal void initializeFilter(const std::string& yaml) { envoy::extensions::filters::http::ip_tagging::v3::IPTagging config; TestUtility::loadFromYaml(yaml, config); - config_.reset(new IpTaggingFilterConfig(config, "prefix.", stats_, runtime_)); + config_ = std::make_shared(config, "prefix.", stats_, runtime_); filter_ = std::make_unique(config_); filter_->setDecoderFilterCallbacks(filter_callbacks_); } diff --git a/test/extensions/filters/http/jwt_authn/filter_test.cc b/test/extensions/filters/http/jwt_authn/filter_test.cc index b920bb2ef77f..9881cd25bab2 100644 --- a/test/extensions/filters/http/jwt_authn/filter_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_test.cc @@ -10,7 +10,6 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication; using ::google::jwt_verify::Status; using testing::_; diff --git a/test/extensions/filters/http/jwt_authn/mock.h b/test/extensions/filters/http/jwt_authn/mock.h index d0f8321a9d4d..38ec192d19f6 100644 --- a/test/extensions/filters/http/jwt_authn/mock.h +++ b/test/extensions/filters/http/jwt_authn/mock.h @@ -71,7 +71,7 @@ class MockUpstream { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response_message->body() = std::make_unique(response_body_); - cb.onSuccess(std::move(response_message)); + cb.onSuccess(request_, std::move(response_message)); called_count_++; return &request_; })); diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index a0fa4184d915..3d2e35eb3d0e 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -71,7 +71,7 @@ class LuaHttpFilterTest : public testing::Test { ~LuaHttpFilterTest() override { filter_->onDestroy(); } void setup(const std::string& lua_code) { - config_.reset(new FilterConfig(lua_code, tls_, cluster_manager_)); + config_ = std::make_shared(lua_code, tls_, cluster_manager_); setupFilter(); } @@ -797,7 +797,7 @@ TEST_F(LuaHttpFilterTest, HttpCall) { EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("response"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); - callbacks->onSuccess(std::move(response_message)); + callbacks->onSuccess(request, std::move(response_message)); } // Basic HTTP request flow. Asynchronous flag set to false. @@ -860,7 +860,7 @@ TEST_F(LuaHttpFilterTest, HttpCallAsyncFalse) { EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("response"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); - callbacks->onSuccess(std::move(response_message)); + callbacks->onSuccess(request, std::move(response_message)); } // Basic asynchronous, fire-and-forget HTTP request flow. @@ -990,14 +990,14 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { callbacks = &cb; return &request; })); - callbacks->onSuccess(std::move(response_message)); + callbacks->onSuccess(request, std::move(response_message)); response_message = std::make_unique( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "403"}}}); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 403"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("no body"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); - callbacks->onSuccess(std::move(response_message)); + callbacks->onSuccess(request, std::move(response_message)); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); @@ -1061,7 +1061,7 @@ TEST_F(LuaHttpFilterTest, HttpCallNoBody) { EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("no body"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); - callbacks->onSuccess(std::move(response_message)); + callbacks->onSuccess(request, std::move(response_message)); } // HTTP call followed by immediate response. @@ -1114,7 +1114,7 @@ TEST_F(LuaHttpFilterTest, HttpCallImmediateResponse) { {"set-cookie", "flavor=chocolate; Path=/"}, {"set-cookie", "variant=chewy; Path=/"}}; EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true)); - callbacks->onSuccess(std::move(response_message)); + callbacks->onSuccess(request, std::move(response_message)); } // HTTP call with script error after resume. @@ -1162,7 +1162,7 @@ TEST_F(LuaHttpFilterTest, HttpCallErrorAfterResumeSuccess) { scriptLog(spdlog::level::err, StrEq("[string \"...\"]:14: attempt to index local 'foo' (a nil value)"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); - callbacks->onSuccess(std::move(response_message)); + callbacks->onSuccess(request, std::move(response_message)); } // HTTP call failure. @@ -1207,7 +1207,7 @@ TEST_F(LuaHttpFilterTest, HttpCallFailure) { EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 503"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("upstream failure"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); - callbacks->onFailure(Http::AsyncClient::FailureReason::Reset); + callbacks->onFailure(request, Http::AsyncClient::FailureReason::Reset); } // HTTP call reset. @@ -1283,7 +1283,9 @@ TEST_F(LuaHttpFilterTest, HttpCallImmediateFailure) { .WillOnce( Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - cb.onFailure(Http::AsyncClient::FailureReason::Reset); + cb.onFailure(request, Http::AsyncClient::FailureReason::Reset); + // Intentionally return nullptr (instead of request handle) to trigger a particular + // code path. return nullptr; })); diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 2104c3317ba3..625801433f74 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -54,8 +54,8 @@ class HttpRateLimitFilterTest : public testing::Test { envoy::extensions::filters::http::ratelimit::v3::RateLimit proto_config{}; TestUtility::loadFromYaml(yaml, proto_config); - config_.reset( - new FilterConfig(proto_config, local_info_, stats_store_, runtime_, http_context_)); + config_ = std::make_shared(proto_config, local_info_, stats_store_, runtime_, + http_context_); client_ = new Filters::Common::RateLimit::MockClient(); filter_ = std::make_unique(config_, Filters::Common::RateLimit::ClientPtr{client_}); @@ -80,6 +80,13 @@ class HttpRateLimitFilterTest : public testing::Test { Filters::Common::RateLimit::MockClient* client_; NiceMock filter_callbacks_; + Stats::StatNamePool pool_{filter_callbacks_.clusterInfo()->statsScope().symbolTable()}; + Stats::StatName ratelimit_ok_{pool_.add("ratelimit.ok")}; + Stats::StatName ratelimit_error_{pool_.add("ratelimit.error")}; + Stats::StatName ratelimit_failure_mode_allowed_{pool_.add("ratelimit.failure_mode_allowed")}; + Stats::StatName ratelimit_over_limit_{pool_.add("ratelimit.over_limit")}; + Stats::StatName upstream_rq_4xx_{pool_.add("upstream_rq_4xx")}; + Stats::StatName upstream_rq_429_{pool_.add("upstream_rq_429")}; Filters::Common::RateLimit::RequestCallbacks* request_callbacks_{}; Http::TestRequestHeaderMapImpl request_headers_; Http::TestRequestTrailerMapImpl request_trailers_; @@ -218,7 +225,8 @@ TEST_F(HttpRateLimitFilterTest, OkResponse) { .Times(0); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { @@ -275,7 +283,8 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { EXPECT_EQ(true, (expected_headers == response_headers)); EXPECT_THAT(*request_headers_to_add, IsSubsetOfHeaders(request_headers_)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } TEST_F(HttpRateLimitFilterTest, ImmediateOkResponse) { @@ -304,7 +313,8 @@ TEST_F(HttpRateLimitFilterTest, ImmediateOkResponse) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } TEST_F(HttpRateLimitFilterTest, ImmediateErrorResponse) { @@ -333,10 +343,12 @@ TEST_F(HttpRateLimitFilterTest, ImmediateErrorResponse) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.error").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_error_).value()); EXPECT_EQ(1U, filter_callbacks_.clusterInfo() ->statsScope() - .counter("ratelimit.failure_mode_allowed") + .counterFromStatName(ratelimit_failure_mode_allowed_) .value()); } @@ -364,10 +376,12 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponse) { setResponseFlag(StreamInfo::ResponseFlag::RateLimited)) .Times(0); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.error").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_error_).value()); EXPECT_EQ(1U, filter_callbacks_.clusterInfo() ->statsScope() - .counter("ratelimit.failure_mode_allowed") + .counterFromStatName(ratelimit_failure_mode_allowed_) .value()); } @@ -392,10 +406,12 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError)) .Times(0); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.error").value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_error_).value()); EXPECT_EQ(0U, filter_callbacks_.clusterInfo() ->statsScope() - .counter("ratelimit.failure_mode_allowed") + .counterFromStatName(ratelimit_failure_mode_allowed_) .value()); EXPECT_EQ("rate_limiter_error", filter_callbacks_.details_); } @@ -427,10 +443,16 @@ TEST_F(HttpRateLimitFilterTest, LimitResponse) { request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, std::move(h), nullptr); - EXPECT_EQ(1U, - filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.over_limit").value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_4xx").value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_429").value()); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromStatName(ratelimit_over_limit_) + .value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); EXPECT_EQ("request_rate_limited", filter_callbacks_.details_); } @@ -474,10 +496,16 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { std::move(uh)); EXPECT_THAT(*request_headers_to_add, Not(IsSubsetOfHeaders(request_headers_))); - EXPECT_EQ(1U, - filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.over_limit").value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_4xx").value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_429").value()); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromStatName(ratelimit_over_limit_) + .value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); } TEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) { @@ -510,10 +538,16 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); - EXPECT_EQ(1U, - filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.over_limit").value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_4xx").value()); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("upstream_rq_429").value()); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromStatName(ratelimit_over_limit_) + .value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); } TEST_F(HttpRateLimitFilterTest, ResetDuringCall) { @@ -658,7 +692,8 @@ TEST_F(HttpRateLimitFilterTest, InternalRequestType) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } TEST_F(HttpRateLimitFilterTest, ExternalRequestType) { @@ -702,7 +737,8 @@ TEST_F(HttpRateLimitFilterTest, ExternalRequestType) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { @@ -743,7 +779,8 @@ TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); - EXPECT_EQ(1U, filter_callbacks_.clusterInfo()->statsScope().counter("ratelimit.ok").value()); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } TEST_F(HttpRateLimitFilterTest, ConfigValueTest) { diff --git a/test/extensions/filters/http/rbac/rbac_filter_test.cc b/test/extensions/filters/http/rbac/rbac_filter_test.cc index e5047c569aaf..d445860f8394 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_test.cc @@ -60,7 +60,7 @@ class RoleBasedAccessControlFilterTest : public testing::Test { void setDestinationPort(uint16_t port) { address_ = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", port, false); - ON_CALL(connection_, localAddress()).WillByDefault(ReturnRef(address_)); + ON_CALL(req_info_, downstreamLocalAddress()).WillByDefault(ReturnRef(address_)); } void setRequestedServerName(std::string server_name) { diff --git a/test/extensions/filters/http/router/auto_sni_integration_test.cc b/test/extensions/filters/http/router/auto_sni_integration_test.cc index 95724d818be1..9a7770c353c3 100644 --- a/test/extensions/filters/http/router/auto_sni_integration_test.cc +++ b/test/extensions/filters/http/router/auto_sni_integration_test.cc @@ -97,7 +97,7 @@ TEST_P(AutoSniIntegrationTest, PassingNotDNS) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ(NULL, SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); } TEST_P(AutoSniIntegrationTest, PassingHostWithoutPort) { diff --git a/test/extensions/filters/http/squash/squash_filter_integration_test.cc b/test/extensions/filters/http/squash/squash_filter_integration_test.cc index a1ead08895b9..79cf9a91c23c 100644 --- a/test/extensions/filters/http/squash/squash_filter_integration_test.cc +++ b/test/extensions/filters/http/squash/squash_filter_integration_test.cc @@ -84,7 +84,7 @@ class SquashFilterIntegrationTest : public testing::TestWithParamadd_clusters(); diff --git a/test/extensions/filters/http/squash/squash_filter_test.cc b/test/extensions/filters/http/squash/squash_filter_test.cc index c4769c97d858..d2cb53bf52b6 100644 --- a/test/extensions/filters/http/squash/squash_filter_test.cc +++ b/test/extensions/filters/http/squash/squash_filter_test.cc @@ -223,7 +223,7 @@ class SquashFilterTest : public testing::Test { Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", status}}})); msg->body() = std::make_unique(body); - popPendingCallback()->onSuccess(std::move(msg)); + popPendingCallback()->onSuccess(request_, std::move(msg)); } void completeCreateRequest() { @@ -265,7 +265,9 @@ TEST_F(SquashFilterTest, DecodeHeaderContinuesOnClientFail) { .WillOnce(Invoke( [&](Envoy::Http::RequestMessagePtr&, Envoy::Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Envoy::Http::AsyncClient::Request* { - callbacks.onFailure(Envoy::Http::AsyncClient::FailureReason::Reset); + callbacks.onFailure(request_, Envoy::Http::AsyncClient::FailureReason::Reset); + // Intentionally return nullptr (instead of request handle) to trigger a particular + // code path. return nullptr; })); @@ -286,7 +288,7 @@ TEST_F(SquashFilterTest, DecodeContinuesOnCreateAttachmentFail) { EXPECT_CALL(filter_callbacks_, continueDecoding()); EXPECT_CALL(*attachmentTimeout_timer_, disableTimer()); - popPendingCallback()->onFailure(Envoy::Http::AsyncClient::FailureReason::Reset); + popPendingCallback()->onFailure(request_, Envoy::Http::AsyncClient::FailureReason::Reset); Envoy::Buffer::OwnedImpl data("nothing here"); EXPECT_EQ(Envoy::Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); @@ -365,7 +367,7 @@ TEST_F(SquashFilterTest, CheckRetryPollingAttachmentOnFailure) { auto retry_timer = new NiceMock(&filter_callbacks_.dispatcher_); EXPECT_CALL(*retry_timer, enableTimer(config_->attachmentPollPeriod(), _)); - popPendingCallback()->onFailure(Envoy::Http::AsyncClient::FailureReason::Reset); + popPendingCallback()->onFailure(request_, Envoy::Http::AsyncClient::FailureReason::Reset); // Expect the second get attachment request expectAsyncClientSend(); diff --git a/test/extensions/filters/http/wasm/config_test.cc b/test/extensions/filters/http/wasm/config_test.cc index 423429a5cf1f..464ea4c9e9c0 100644 --- a/test/extensions/filters/http/wasm/config_test.cc +++ b/test/extensions/filters/http/wasm/config_test.cc @@ -8,6 +8,7 @@ #include "extensions/common/wasm/wasm.h" #include "extensions/filters/http/wasm/config.h" +#include "test/mocks/http/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" @@ -179,6 +180,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteWASM) { envoy::extensions::filters::http::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -190,8 +193,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteWASM) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(code); - callbacks.onSuccess(std::move(response)); - return nullptr; + callbacks.onSuccess(request, std::move(response)); + return &request; })); Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); @@ -227,6 +230,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteConnectionReset) { envoy::extensions::filters::http::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -234,8 +239,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteConnectionReset) { .WillOnce( Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - callbacks.onFailure(Envoy::Http::AsyncClient::FailureReason::Reset); - return nullptr; + callbacks.onFailure(request, Envoy::Http::AsyncClient::FailureReason::Reset); + return &request; })); Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); @@ -268,6 +273,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessWith503) { envoy::extensions::filters::http::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -276,9 +283,10 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessWith503) { Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callbacks.onSuccess( + request, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); - return nullptr; + return &request; })); Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); @@ -310,6 +318,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessIncorrectSha256) { envoy::extensions::filters::http::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -321,8 +331,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessIncorrectSha256) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(code); - callbacks.onSuccess(std::move(response)); - return nullptr; + callbacks.onSuccess(request, std::move(response)); + return &request; })); Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context_); @@ -356,6 +366,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteMultipleRetries) { envoy::extensions::filters::http::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); int num_retries = 3; EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillRepeatedly(ReturnRef(cluster_manager_.async_client_)); @@ -368,8 +380,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteMultipleRetries) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "503"}}})); response->body() = std::make_unique(code); - callbacks.onSuccess(std::move(response)); - return nullptr; + callbacks.onSuccess(request, std::move(response)); + return &request; })); EXPECT_CALL(*retry_timer_, enableTimer(_, _)) @@ -383,9 +395,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteMultipleRetries) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(code); - - callbacks.onSuccess(std::move(response)); - return nullptr; + callbacks.onSuccess(request, std::move(response)); + return &request; })); } @@ -423,6 +434,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessBadcode) { envoy::extensions::filters::http::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -434,7 +447,7 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromRemoteSuccessBadcode) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(code); - callbacks.onSuccess(std::move(response)); + callbacks.onSuccess(request, std::move(response)); return nullptr; })); diff --git a/test/extensions/filters/http/wasm/wasm_filter_test.cc b/test/extensions/filters/http/wasm/wasm_filter_test.cc index f5a6909d74d6..b5b8bdef4aba 100644 --- a/test/extensions/filters/http/wasm/wasm_filter_test.cc +++ b/test/extensions/filters/http/wasm/wasm_filter_test.cc @@ -495,7 +495,7 @@ TEST_P(WasmHttpFilterTest, AsyncCall) { EXPECT_NE(callbacks, nullptr); if (callbacks) { - callbacks->onSuccess(std::move(response_message)); + callbacks->onSuccess(request, std::move(response_message)); } } @@ -540,7 +540,7 @@ TEST_P(WasmHttpFilterTest, AsyncCallAfterDestroyed) { // (Don't) Make the callback on the destroyed VM. EXPECT_EQ(callbacks, nullptr); if (callbacks) { - callbacks->onSuccess(std::move(response_message)); + callbacks->onSuccess(request, std::move(response_message)); } } diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc index 12bca4c9eac6..c2037ed8ca67 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc @@ -31,12 +31,13 @@ TEST(HttpInspectorConfigFactoryTest, TestCreateFactory) { Server::Configuration::MockListenerFactoryContext context; EXPECT_CALL(context, scope()).Times(1); Network::ListenerFilterFactoryCb cb = - factory->createFilterFactoryFromProto(*proto_config, context); + factory->createListenerFilterFactoryFromProto(*proto_config, nullptr, context); Network::MockListenerFilterManager manager; Network::ListenerFilterPtr added_filter; - EXPECT_CALL(manager, addAcceptFilter_(_)) - .WillOnce(Invoke([&added_filter](Network::ListenerFilterPtr& filter) { + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&added_filter](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr& filter) { added_filter = std::move(filter); })); cb(manager); diff --git a/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc b/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc index 9e2ebd4c1fcc..dca16ca9e455 100644 --- a/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc @@ -29,11 +29,12 @@ TEST(OriginalSrcConfigFactoryTest, TestCreateFactory) { NiceMock context; Network::ListenerFilterFactoryCb cb = - factory.createFilterFactoryFromProto(*proto_config, context); + factory.createListenerFilterFactoryFromProto(*proto_config, nullptr, context); Network::MockListenerFilterManager manager; Network::ListenerFilterPtr added_filter; - EXPECT_CALL(manager, addAcceptFilter_(_)) - .WillOnce(Invoke([&added_filter](Network::ListenerFilterPtr& filter) { + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&added_filter](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr& filter) { added_filter = std::move(filter); })); cb(manager); diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 9dd9b3118d48..6efd2b9639a6 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -77,9 +77,7 @@ class ProxyProtocolTest : public testing::TestWithParam& accessLogs() const override { + return empty_access_logs_; + } // Network::FilterChainManager const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override { @@ -107,13 +108,13 @@ class ProxyProtocolTest : public testing::TestWithParam bool { filter_manager.addAcceptFilter( - std::make_unique(std::make_shared(listenerScope()))); + nullptr, std::make_unique(std::make_shared(listenerScope()))); maybeExitDispatcher(); return true; })); conn_->connect(); if (read) { - read_filter_.reset(new NiceMock()); + read_filter_ = std::make_shared>(); EXPECT_CALL(factory_, createNetworkFilterChain(_, _)) .WillOnce(Invoke([&](Network::Connection& connection, const std::vector&) -> bool { @@ -170,7 +171,7 @@ class ProxyProtocolTest : public testing::TestWithParam socket_; @@ -186,6 +187,7 @@ class ProxyProtocolTest : public testing::TestWithParam empty_access_logs_; }; // Parameterize the listener socket address version. @@ -1010,7 +1012,7 @@ class WildcardProxyProtocolTest : public testing::TestWithParam bool { filter_manager.addAcceptFilter( - std::make_unique(std::make_shared(listenerScope()))); + nullptr, std::make_unique(std::make_shared(listenerScope()))); return true; })); } @@ -1022,9 +1024,7 @@ class WildcardProxyProtocolTest : public testing::TestWithParam& accessLogs() const override { + return empty_access_logs_; + } // Network::FilterChainManager const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override { @@ -1042,7 +1045,7 @@ class WildcardProxyProtocolTest : public testing::TestWithParamconnect(); - read_filter_.reset(new NiceMock()); + read_filter_ = std::make_shared>(); EXPECT_CALL(factory_, createNetworkFilterChain(_, _)) .WillOnce(Invoke([&](Network::Connection& connection, const std::vector&) -> bool { @@ -1099,6 +1102,7 @@ class WildcardProxyProtocolTest : public testing::TestWithParam read_filter_; std::string name_; const Network::FilterChainSharedPtr filter_chain_; + const std::vector empty_access_logs_; }; // Parameterize the listener socket address version. diff --git a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc index b4b522f7c274..634d780a0170 100644 --- a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc @@ -112,7 +112,7 @@ stat_prefix: vpn std::unique_ptr instance_; Event::MockTimer* interval_timer_; Http::AsyncClient::Callbacks* callbacks_; - Stats::IsolatedStoreImpl stats_store_; + Stats::TestUtil::TestStore stats_store_; NiceMock random_; Api::ApiPtr api_; std::shared_ptr ssl_; @@ -173,7 +173,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { message->body() = std::make_unique( api_->fileSystem().fileReadToEnd(TestEnvironment::runfilesPath( "test/extensions/filters/network/client_ssl_auth/test_data/vpn_response_1.json"))); - callbacks_->onSuccess(std::move(message)); + callbacks_->onSuccess(request_, std::move(message)); EXPECT_EQ(1U, stats_store_ .gauge("auth.clientssl.vpn.total_principals", Stats::Gauge::ImportMode::NeverImport) @@ -227,7 +227,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { EXPECT_CALL(*interval_timer_, enableTimer(_, _)); message = std::make_unique( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "503"}}}); - callbacks_->onSuccess(std::move(message)); + callbacks_->onSuccess(request_, std::move(message)); // Interval timer fires. setupRequest(); @@ -238,7 +238,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { message = std::make_unique( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}}); message->body() = std::make_unique("bad_json"); - callbacks_->onSuccess(std::move(message)); + callbacks_->onSuccess(request_, std::move(message)); // Interval timer fires. setupRequest(); @@ -246,7 +246,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { // No response failure. EXPECT_CALL(*interval_timer_, enableTimer(_, _)); - callbacks_->onFailure(Http::AsyncClient::FailureReason::Reset); + callbacks_->onFailure(request_, Http::AsyncClient::FailureReason::Reset); // Interval timer fires, cannot obtain async client. EXPECT_CALL(cm_, httpAsyncClientForCluster("vpn")).WillOnce(ReturnRef(cm_.async_client_)); @@ -255,8 +255,11 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callbacks.onSuccess( + request_, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); + // Intentionally return nullptr (instead of request handle) to trigger a particular + // code path. return nullptr; })); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); diff --git a/test/extensions/filters/network/common/redis/client_impl_test.cc b/test/extensions/filters/network/common/redis/client_impl_test.cc index 044e003a1221..c9028a1da42a 100644 --- a/test/extensions/filters/network/common/redis/client_impl_test.cc +++ b/test/extensions/filters/network/common/redis/client_impl_test.cc @@ -25,7 +25,6 @@ using testing::Invoke; using testing::Property; using testing::Ref; using testing::Return; -using testing::ReturnRef; using testing::SaveArg; namespace Envoy { diff --git a/test/extensions/filters/network/direct_response/direct_response_integration_test.cc b/test/extensions/filters/network/direct_response/direct_response_integration_test.cc index 919f1490e845..6b6272165ae3 100644 --- a/test/extensions/filters/network/direct_response/direct_response_integration_test.cc +++ b/test/extensions/filters/network/direct_response/direct_response_integration_test.cc @@ -10,7 +10,7 @@ class DirectResponseIntegrationTest : public testing::TestWithParam factory_context_; - Stats::IsolatedStoreImpl store_; + Stats::TestUtil::TestStore store_; DubboFilterStats stats_; ConfigDubboProxy proto_config_; diff --git a/test/extensions/filters/network/dubbo_proxy/mocks.cc b/test/extensions/filters/network/dubbo_proxy/mocks.cc index 13565441e643..4cd21e654dc9 100644 --- a/test/extensions/filters/network/dubbo_proxy/mocks.cc +++ b/test/extensions/filters/network/dubbo_proxy/mocks.cc @@ -62,7 +62,7 @@ MockDecoderFilter::MockDecoderFilter() { MockDecoderFilter::~MockDecoderFilter() = default; MockDecoderFilterCallbacks::MockDecoderFilterCallbacks() { - route_.reset(new NiceMock()); + route_ = std::make_shared>(); ON_CALL(*this, streamId()).WillByDefault(Return(stream_id_)); ON_CALL(*this, connection()).WillByDefault(Return(&connection_)); @@ -80,7 +80,7 @@ MockEncoderFilter::MockEncoderFilter() { MockEncoderFilter::~MockEncoderFilter() = default; MockEncoderFilterCallbacks::MockEncoderFilterCallbacks() { - route_.reset(new NiceMock()); + route_ = std::make_shared>(); ON_CALL(*this, streamId()).WillByDefault(Return(stream_id_)); ON_CALL(*this, connection()).WillByDefault(Return(&connection_)); diff --git a/test/extensions/filters/network/dubbo_proxy/router_test.cc b/test/extensions/filters/network/dubbo_proxy/router_test.cc index 13796ab9fead..e21148d3d743 100644 --- a/test/extensions/filters/network/dubbo_proxy/router_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/router_test.cc @@ -1,3 +1,5 @@ +#include + #include "extensions/filters/network/dubbo_proxy/app_exception.h" #include "extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.h" #include "extensions/filters/network/dubbo_proxy/message_impl.h" @@ -93,7 +95,7 @@ class DubboRouterTestBase { void initializeMetadata(MessageType msg_type) { msg_type_ = msg_type; - metadata_.reset(new MessageMetadata()); + metadata_ = std::make_shared(); metadata_->setMessageType(msg_type_); metadata_->setRequestId(1); diff --git a/test/extensions/filters/network/ext_authz/ext_authz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_test.cc index bef1d2b10e5e..47a208dc8c3b 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_test.cc @@ -50,7 +50,7 @@ class ExtAuthzFilterTest : public testing::Test { envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config{}; TestUtility::loadFromJson(json, proto_config); - config_.reset(new Config(proto_config, stats_store_)); + config_ = std::make_shared(proto_config, stats_store_); client_ = new Filters::Common::ExtAuthz::MockClient(); filter_ = std::make_unique(config_, Filters::Common::ExtAuthz::ClientPtr{client_}); filter_->initializeReadFilterCallbacks(filter_callbacks_); @@ -75,7 +75,7 @@ class ExtAuthzFilterTest : public testing::Test { } } - Stats::IsolatedStoreImpl stats_store_; + Stats::TestUtil::TestStore stats_store_; ConfigSharedPtr config_; Filters::Common::ExtAuthz::MockClient* client_; std::unique_ptr filter_; @@ -106,7 +106,7 @@ TEST_F(ExtAuthzFilterTest, OKWithOnData) { EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, testing::A())) + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -147,7 +147,7 @@ TEST_F(ExtAuthzFilterTest, DeniedWithOnData) { EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -186,7 +186,7 @@ TEST_F(ExtAuthzFilterTest, FailOpen) { EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -218,7 +218,7 @@ TEST_F(ExtAuthzFilterTest, FailClose) { EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -247,7 +247,7 @@ TEST_F(ExtAuthzFilterTest, DoNotCallCancelonRemoteClose) { EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -280,7 +280,7 @@ TEST_F(ExtAuthzFilterTest, VerifyCancelOnRemoteClose) { EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -309,7 +309,7 @@ TEST_F(ExtAuthzFilterTest, ImmediateOK) { EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::OK)); @@ -339,7 +339,7 @@ TEST_F(ExtAuthzFilterTest, ImmediateNOK) { EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Denied)); @@ -365,7 +365,7 @@ TEST_F(ExtAuthzFilterTest, ImmediateErrorFailOpen) { EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); - EXPECT_CALL(*client_, check(_, _, _)) + EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error)); diff --git a/test/extensions/filters/network/http_connection_manager/BUILD b/test/extensions/filters/network/http_connection_manager/BUILD index af387fccb8ef..74f20ff9c472 100644 --- a/test/extensions/filters/network/http_connection_manager/BUILD +++ b/test/extensions/filters/network/http_connection_manager/BUILD @@ -3,6 +3,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", "envoy_package", + "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", @@ -11,11 +12,17 @@ load( envoy_package() +envoy_proto_library( + name = "config", + srcs = ["config.proto"], +) + envoy_extension_cc_test( name = "config_test", srcs = ["config_test.cc"], extension_name = "envoy.filters.network.http_connection_manager", deps = [ + ":config_cc_proto", "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_lib", "//source/extensions/access_loggers/file:config", diff --git a/test/extensions/filters/network/http_connection_manager/config.proto b/test/extensions/filters/network/http_connection_manager/config.proto new file mode 100644 index 000000000000..7b75b77076f4 --- /dev/null +++ b/test/extensions/filters/network/http_connection_manager/config.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package test.http_connection_manager; + +message CustomRequestIDExtension { + string test_field = 1; +} + +message UnknownRequestIDExtension { +} \ No newline at end of file diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index f292bb502d88..54b438220e79 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1,13 +1,17 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" +#include "envoy/server/request_id_extension_config.h" #include "envoy/type/v3/percent.pb.h" #include "common/buffer/buffer_impl.h" #include "common/http/date_provider_impl.h" +#include "common/http/request_id_extension_uuid_impl.h" #include "extensions/filters/network/http_connection_manager/config.h" +#include "test/extensions/filters/network/http_connection_manager/config.pb.h" +#include "test/extensions/filters/network/http_connection_manager/config.pb.validate.h" #include "test/mocks/config/mocks.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" @@ -24,7 +28,6 @@ using testing::Eq; using testing::NotNull; using testing::Pointee; using testing::Return; -using testing::ReturnRef; using testing::WhenDynamicCastTo; namespace Envoy { @@ -947,6 +950,61 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesFalse) { EXPECT_FALSE(config.shouldMergeSlashes()); } +// Validated that by default we allow requests with header names containing underscores. +TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresAllowedByDefault) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::ALLOW, + config.headersWithUnderscoresAction()); +} + +// Validated that when configured, we drop headers with underscores. +TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresDroppedByConfig) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + common_http_protocol_options: + headers_with_underscores_action: DROP_HEADER + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER, + config.headersWithUnderscoresAction()); +} + +// Validated that when configured, we reject requests with header names containing underscores. +TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresRequestRejectedByConfig) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + common_http_protocol_options: + headers_with_underscores_action: REJECT_REQUEST + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST, + config.headersWithUnderscoresAction()); +} + TEST_F(HttpConnectionManagerConfigTest, ConfiguredRequestTimeout) { const std::string yaml_string = R"EOF( stat_prefix: ingress_http @@ -1372,6 +1430,111 @@ TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtens deprecated_name)); } +namespace { + +class TestRequestIDExtension : public Http::RequestIDExtension { +public: + TestRequestIDExtension(const test::http_connection_manager::CustomRequestIDExtension& config) + : config_(config) {} + + void set(Http::RequestHeaderMap&, bool) override {} + void setInResponse(Http::ResponseHeaderMap&, const Http::RequestHeaderMap&) override {} + bool modBy(const Http::RequestHeaderMap&, uint64_t&, uint64_t) override { return false; } + Http::TraceStatus getTraceStatus(const Http::RequestHeaderMap&) override { + return Http::TraceStatus::Sampled; + } + void setTraceStatus(Http::RequestHeaderMap&, Http::TraceStatus) override {} + + std::string testField() { return config_.test_field(); } + +private: + test::http_connection_manager::CustomRequestIDExtension config_; +}; + +class TestRequestIDExtensionFactory : public Server::Configuration::RequestIDExtensionFactory { +public: + std::string name() const override { + return "test.http_connection_manager.CustomRequestIDExtension"; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + Http::RequestIDExtensionSharedPtr + createExtensionInstance(const Protobuf::Message& config, + Server::Configuration::FactoryContext& context) override { + const auto& custom_config = MessageUtil::downcastAndValidate< + const test::http_connection_manager::CustomRequestIDExtension&>( + config, context.messageValidationVisitor()); + return std::make_shared(custom_config); + } +}; + +} // namespace + +TEST_F(HttpConnectionManagerConfigTest, CustomRequestIDExtension) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + request_id_extension: + typed_config: + "@type": type.googleapis.com/test.http_connection_manager.CustomRequestIDExtension + test_field: example + http_filters: + - name: envoy.filters.http.router + )EOF"; + Registry::RegisterFactory + registered; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + auto request_id_extension = + dynamic_cast(config.requestIDExtension().get()); + ASSERT_NE(nullptr, request_id_extension); + EXPECT_EQ("example", request_id_extension->testField()); +} + +TEST_F(HttpConnectionManagerConfigTest, UnknownRequestIDExtension) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + request_id_extension: + typed_config: + "@type": type.googleapis.com/test.http_connection_manager.UnknownRequestIDExtension + http_filters: + - name: envoy.filters.http.router + )EOF"; + + EXPECT_THROW_WITH_REGEX( + HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_), + EnvoyException, "Didn't find a registered implementation for type"); +} + +TEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + request_id_extension: {} + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + auto request_id_extension = + dynamic_cast(config.requestIDExtension().get()); + ASSERT_NE(nullptr, request_id_extension); +} + class FilterChainTest : public HttpConnectionManagerConfigTest { public: const std::string basic_config_ = R"EOF( diff --git a/test/extensions/filters/network/kafka/BUILD b/test/extensions/filters/network/kafka/BUILD index da6612efa710..19ac80b6947f 100644 --- a/test/extensions/filters/network/kafka/BUILD +++ b/test/extensions/filters/network/kafka/BUILD @@ -269,7 +269,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.kafka_broker", deps = [ ":message_utilities", - "//source/common/stats:isolated_store_lib", "//source/extensions/filters/network/kafka:kafka_broker_filter_lib", + "//test/common/stats:stat_test_utility_lib", ], ) diff --git a/test/extensions/filters/network/kafka/broker/BUILD b/test/extensions/filters/network/kafka/broker/BUILD index a14100d79309..89664fb909b8 100644 --- a/test/extensions/filters/network/kafka/broker/BUILD +++ b/test/extensions/filters/network/kafka/broker/BUILD @@ -36,8 +36,8 @@ envoy_extension_cc_test( srcs = ["filter_protocol_test.cc"], extension_name = "envoy.filters.network.kafka_broker", deps = [ - "//source/common/stats:isolated_store_lib", "//source/extensions/filters/network/kafka:kafka_broker_filter_lib", + "//test/common/stats:stat_test_utility_lib", "//test/extensions/filters/network/kafka:buffer_based_test_lib", "//test/extensions/filters/network/kafka:message_utilities", "//test/test_common:test_time_lib", diff --git a/test/extensions/filters/network/kafka/broker/filter_protocol_test.cc b/test/extensions/filters/network/kafka/broker/filter_protocol_test.cc index ff7461279f45..2c0606ae91e9 100644 --- a/test/extensions/filters/network/kafka/broker/filter_protocol_test.cc +++ b/test/extensions/filters/network/kafka/broker/filter_protocol_test.cc @@ -10,6 +10,7 @@ #include "extensions/filters/network/kafka/external/requests.h" #include "extensions/filters/network/kafka/external/responses.h" +#include "test/common/stats/stat_test_utility.h" #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/message_utilities.h" #include "test/test_common/test_time.h" @@ -32,7 +33,7 @@ class KafkaBrokerFilterProtocolTest : public testing::Test, protected RequestB, protected ResponseB { protected: - Stats::IsolatedStoreImpl scope_; + Stats::TestUtil::TestStore scope_; Event::TestRealTimeSystem time_source_; KafkaBrokerFilter testee_{scope_, time_source_, "prefix"}; diff --git a/test/extensions/filters/network/kafka/buffer_based_test.h b/test/extensions/filters/network/kafka/buffer_based_test.h index 138d5f3d9335..612f484df5ba 100644 --- a/test/extensions/filters/network/kafka/buffer_based_test.h +++ b/test/extensions/filters/network/kafka/buffer_based_test.h @@ -20,9 +20,8 @@ namespace Kafka { class BufferBasedTest { protected: const char* getBytes() { - uint64_t num_slices = buffer_.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - buffer_.getRawSlices(slices.begin(), num_slices); + Buffer::RawSliceVector slices = buffer_.getRawSlices(1); + ASSERT(slices.size() == 1); return reinterpret_cast((slices[0]).mem_); } diff --git a/test/extensions/filters/network/kafka/metrics_integration_test.cc b/test/extensions/filters/network/kafka/metrics_integration_test.cc index beac2e008d12..252644485c99 100644 --- a/test/extensions/filters/network/kafka/metrics_integration_test.cc +++ b/test/extensions/filters/network/kafka/metrics_integration_test.cc @@ -1,8 +1,7 @@ -#include "common/stats/isolated_store_impl.h" - #include "extensions/filters/network/kafka/external/request_metrics.h" #include "extensions/filters/network/kafka/external/response_metrics.h" +#include "test/common/stats/stat_test_utility.h" #include "test/extensions/filters/network/kafka/message_utilities.h" #include "gtest/gtest.h" @@ -15,7 +14,7 @@ namespace MetricsIntegrationTest { class MetricsIntegrationTest : public testing::Test { protected: - Stats::IsolatedStoreImpl scope_; + Stats::TestUtil::TestStore scope_; RichRequestMetricsImpl request_metrics_{scope_, "prefix"}; RichResponseMetricsImpl response_metrics_{scope_, "prefix"}; }; diff --git a/test/extensions/filters/network/kafka/serialization_utilities.cc b/test/extensions/filters/network/kafka/serialization_utilities.cc index 16eaac30c3dd..625d4f083501 100644 --- a/test/extensions/filters/network/kafka/serialization_utilities.cc +++ b/test/extensions/filters/network/kafka/serialization_utilities.cc @@ -13,9 +13,8 @@ void assertStringViewIncrement(const absl::string_view incremented, } const char* getRawData(const Buffer::OwnedImpl& buffer) { - uint64_t num_slices = buffer.getRawSlices(nullptr, 0); - absl::FixedArray slices(num_slices); - buffer.getRawSlices(slices.begin(), num_slices); + Buffer::RawSliceVector slices = buffer.getRawSlices(1); + ASSERT(slices.size() == 1); return reinterpret_cast((slices[0]).mem_); } diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc b/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc index 55c7308a53c5..bb1fb3be37a5 100644 --- a/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc @@ -8,7 +8,7 @@ class LocalRateLimitIntegrationTest : public Event::TestUsingSimulatedTime, public BaseIntegrationTest { public: LocalRateLimitIntegrationTest() - : BaseIntegrationTest(GetParam(), ConfigHelper::TCP_PROXY_CONFIG) {} + : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) {} ~LocalRateLimitIntegrationTest() override { test_server_.reset(); diff --git a/test/extensions/filters/network/mongo_proxy/proxy_test.cc b/test/extensions/filters/network/mongo_proxy/proxy_test.cc index 52f048d59275..71df3fb3d7d7 100644 --- a/test/extensions/filters/network/mongo_proxy/proxy_test.cc +++ b/test/extensions/filters/network/mongo_proxy/proxy_test.cc @@ -75,7 +75,7 @@ class MongoProxyFilterTest : public testing::Test { .WillRepeatedly(ReturnRef(stream_info_)); EXPECT_CALL(log_manager_, createAccessLog(_)).WillOnce(Return(file_)); - access_log_.reset(new AccessLog("test", log_manager_, dispatcher_.timeSource())); + access_log_ = std::make_shared("test", log_manager_, dispatcher_.timeSource()); } void initializeFilter(bool emit_dynamic_metadata = false) { @@ -96,7 +96,7 @@ class MongoProxyFilterTest : public testing::Test { fault.mutable_percentage()->set_denominator(envoy::type::v3::FractionalPercent::HUNDRED); fault.mutable_fixed_delay()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(10)); - fault_config_.reset(new Filters::Common::Fault::FaultDelayConfig(fault)); + fault_config_ = std::make_shared(fault); EXPECT_CALL(runtime_.snapshot_, featureEnabled("mongo.fault.fixed_delay.percent", diff --git a/test/extensions/filters/network/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/ratelimit/ratelimit_test.cc index 3fe736ecfe39..e77f6080cba8 100644 --- a/test/extensions/filters/network/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/ratelimit/ratelimit_test.cc @@ -41,7 +41,7 @@ class RateLimitFilterTest : public testing::Test { envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{}; TestUtility::loadFromYaml(yaml, proto_config); - config_.reset(new Config(proto_config, stats_store_, runtime_)); + config_ = std::make_shared(proto_config, stats_store_, runtime_); client_ = new Filters::Common::RateLimit::MockClient(); filter_ = std::make_unique(config_, Filters::Common::RateLimit::ClientPtr{client_}); filter_->initializeReadFilterCallbacks(filter_callbacks_); @@ -86,7 +86,7 @@ stat_prefix: name failure_mode_deny: true )EOF"; - Stats::IsolatedStoreImpl stats_store_; + Stats::TestUtil::TestStore stats_store_; NiceMock runtime_; ConfigSharedPtr config_; Filters::Common::RateLimit::MockClient* client_; diff --git a/test/extensions/filters/network/rbac/filter_test.cc b/test/extensions/filters/network/rbac/filter_test.cc index 748d3036a99a..2e8fd2642da3 100644 --- a/test/extensions/filters/network/rbac/filter_test.cc +++ b/test/extensions/filters/network/rbac/filter_test.cc @@ -63,7 +63,7 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { void setDestinationPort(uint16_t port) { address_ = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", port, false); - EXPECT_CALL(callbacks_.connection_, localAddress()).WillRepeatedly(ReturnRef(address_)); + EXPECT_CALL(stream_info_, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address_)); } void setRequestedServerName(std::string server_name) { diff --git a/test/extensions/filters/network/rbac/integration_test.cc b/test/extensions/filters/network/rbac/integration_test.cc index 063ec1b95b46..571ba94b678f 100644 --- a/test/extensions/filters/network/rbac/integration_test.cc +++ b/test/extensions/filters/network/rbac/integration_test.cc @@ -27,8 +27,8 @@ class RoleBasedAccessControlNetworkFilterIntegrationTest RoleBasedAccessControlNetworkFilterIntegrationTest() : BaseIntegrationTest(GetParam(), rbac_config) {} - static void SetUpTestSuite() { - rbac_config = ConfigHelper::BASE_CONFIG + R"EOF( + static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) + rbac_config = absl::StrCat(ConfigHelper::baseConfig(), R"EOF( filter_chains: filters: - name: rbac @@ -44,7 +44,7 @@ class RoleBasedAccessControlNetworkFilterIntegrationTest - not_id: any: true - name: envoy.filters.network.echo -)EOF"; +)EOF"); } void initializeFilter(const std::string& config) { diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index 3ef0fc22f3ce..5bb208bfa901 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -43,7 +43,7 @@ class MockRoute : public Route { class MockMirrorPolicy : public MirrorPolicy { public: MockMirrorPolicy(ConnPool::InstanceSharedPtr); - ~MockMirrorPolicy() = default; + ~MockMirrorPolicy() override = default; MOCK_METHOD(ConnPool::InstanceSharedPtr, upstream, (), (const)); MOCK_METHOD(bool, shouldMirror, (const std::string&), (const)); diff --git a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc index a17a1dd58def..72cebf97fcd2 100644 --- a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc @@ -5,6 +5,7 @@ #include "extensions/filters/network/redis_proxy/proxy_filter.h" +#include "test/common/stats/stat_test_utility.h" #include "test/extensions/filters/network/common/redis/mocks.h" #include "test/extensions/filters/network/redis_proxy/mocks.h" #include "test/mocks/api/mocks.h" @@ -42,7 +43,7 @@ parseProtoFromYaml(const std::string& yaml_string) { class RedisProxyFilterConfigTest : public testing::Test { public: - Stats::IsolatedStoreImpl store_; + Stats::TestUtil::TestStore store_; Network::MockDrainDecision drain_decision_; Runtime::MockLoader runtime_; NiceMock api_; @@ -106,7 +107,8 @@ class RedisProxyFilterTest : public testing::Test, public Common::Redis::Decoder RedisProxyFilterTest(const std::string& yaml_string) { envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config = parseProtoFromYaml(yaml_string); - config_.reset(new ProxyFilterConfig(proto_config, store_, drain_decision_, runtime_, api_)); + config_ = + std::make_shared(proto_config, store_, drain_decision_, runtime_, api_); filter_ = std::make_unique(*this, Common::Redis::EncoderPtr{encoder_}, splitter_, config_); filter_->initializeReadFilterCallbacks(filter_callbacks_); @@ -138,7 +140,7 @@ class RedisProxyFilterTest : public testing::Test, public Common::Redis::Decoder Common::Redis::MockDecoder* decoder_{new Common::Redis::MockDecoder()}; Common::Redis::DecoderCallbacks* decoder_callbacks_{}; CommandSplitter::MockInstance splitter_; - Stats::IsolatedStoreImpl store_; + Stats::TestUtil::TestStore store_; NiceMock drain_decision_; NiceMock runtime_; ProxyFilterConfigSharedPtr config_; diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index 3f74f71aaaf7..b1e33acfe820 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -7,8 +7,6 @@ #include "gtest/gtest.h" -using testing::Return; - namespace RedisCmdSplitter = Envoy::Extensions::NetworkFilters::RedisProxy::CommandSplitter; namespace Envoy { diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index a1abb6eb7746..5de6de271950 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -12,6 +12,7 @@ #include "extensions/filters/network/thrift_proxy/framed_transport_impl.h" #include "extensions/filters/network/thrift_proxy/header_transport_impl.h" +#include "test/common/stats/stat_test_utility.h" #include "test/extensions/filters/network/thrift_proxy/mocks.h" #include "test/extensions/filters/network/thrift_proxy/utility.h" #include "test/mocks/network/mocks.h" @@ -96,7 +97,7 @@ class ThriftConnectionManagerTest : public testing::Test { proto_config_.set_stat_prefix("test"); - decoder_filter_.reset(new NiceMock()); + decoder_filter_ = std::make_shared>(); config_ = std::make_unique(proto_config_, context_, decoder_filter_, stats_); if (custom_transport_) { @@ -294,7 +295,7 @@ class ThriftConnectionManagerTest : public testing::Test { NiceMock context_; std::shared_ptr decoder_filter_; - Stats::IsolatedStoreImpl store_; + Stats::TestUtil::TestStore store_; ThriftFilterStats stats_; envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy proto_config_; diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc index 9ea412d0362a..df6fabc70d9a 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc @@ -55,9 +55,9 @@ class ThriftRateLimitFilterTest : public testing::Test { proto_config{}; TestUtility::loadFromYaml(yaml, proto_config); - config_.reset(new Config(proto_config, local_info_, stats_store_, runtime_, cm_)); + config_ = std::make_shared(proto_config, local_info_, stats_store_, runtime_, cm_); - request_metadata_.reset(new ThriftProxy::MessageMetadata()); + request_metadata_ = std::make_shared(); client_ = new Filters::Common::RateLimit::MockClient(); filter_ = std::make_unique(config_, Filters::Common::RateLimit::ClientPtr{client_}); diff --git a/test/extensions/filters/network/thrift_proxy/integration_test.cc b/test/extensions/filters/network/thrift_proxy/integration_test.cc index ffc97ab84f44..19ba17e4eb7e 100644 --- a/test/extensions/filters/network/thrift_proxy/integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/integration_test.cc @@ -22,8 +22,8 @@ class ThriftConnManagerIntegrationTest : public testing::TestWithParam>, public BaseThriftIntegrationTest { public: - static void SetUpTestSuite() { - thrift_config_ = ConfigHelper::BASE_CONFIG + R"EOF( + static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) + thrift_config_ = absl::StrCat(ConfigHelper::baseConfig(), R"EOF( filter_chains: filters: - name: thrift @@ -64,7 +64,7 @@ class ThriftConnManagerIntegrationTest method_name: "poke" route: cluster: "cluster_3" - )EOF"; + )EOF"); } void initializeCall(DriverMode mode) { diff --git a/test/extensions/filters/network/thrift_proxy/mocks.cc b/test/extensions/filters/network/thrift_proxy/mocks.cc index 8b68296e786d..ee57deef2893 100644 --- a/test/extensions/filters/network/thrift_proxy/mocks.cc +++ b/test/extensions/filters/network/thrift_proxy/mocks.cc @@ -1,5 +1,7 @@ #include "test/extensions/filters/network/thrift_proxy/mocks.h" +#include + #include "common/protobuf/protobuf.h" #include "gtest/gtest.h" @@ -81,7 +83,7 @@ MockDecoderFilter::MockDecoderFilter() { MockDecoderFilter::~MockDecoderFilter() = default; MockDecoderFilterCallbacks::MockDecoderFilterCallbacks() { - route_.reset(new NiceMock()); + route_ = std::make_shared>(); ON_CALL(*this, streamId()).WillByDefault(Return(stream_id_)); ON_CALL(*this, connection()).WillByDefault(Return(&connection_)); @@ -92,7 +94,7 @@ MockDecoderFilterCallbacks::~MockDecoderFilterCallbacks() = default; MockFilterConfigFactory::MockFilterConfigFactory() : FactoryBase("envoy.filters.thrift.mock_filter") { - mock_filter_.reset(new NiceMock()); + mock_filter_ = std::make_shared>(); } MockFilterConfigFactory::~MockFilterConfigFactory() = default; diff --git a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc index 70a5c6174001..458412b7d0e0 100644 --- a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc @@ -38,7 +38,7 @@ class ThriftRateLimitConfigurationTest : public testing::Test { } MessageMetadata& genMetadata(const std::string& method_name) { - metadata_.reset(new MessageMetadata()); + metadata_ = std::make_shared(); metadata_->setMethodName(method_name); return *metadata_; } diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc index 3b094f1ad344..ca3d8198a5bd 100644 --- a/test/extensions/filters/network/thrift_proxy/router_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_test.cc @@ -95,7 +95,7 @@ class ThriftRouterTestBase { void initializeMetadata(MessageType msg_type, std::string method = "method") { msg_type_ = msg_type; - metadata_.reset(new MessageMetadata()); + metadata_ = std::make_shared(); metadata_->setMethodName(method); metadata_->setMessageType(msg_type_); metadata_->setSequenceId(1); @@ -437,7 +437,7 @@ TEST_F(ThriftRouterTest, NoRoute) { EXPECT_TRUE(end_stream); })); EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_)); - EXPECT_EQ(1U, context_.scope().counter("test.route_missing").value()); + EXPECT_EQ(1U, context_.scope().counterFromString("test.route_missing").value()); } TEST_F(ThriftRouterTest, NoCluster) { @@ -456,7 +456,7 @@ TEST_F(ThriftRouterTest, NoCluster) { EXPECT_TRUE(end_stream); })); EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_)); - EXPECT_EQ(1U, context_.scope().counter("test.unknown_cluster").value()); + EXPECT_EQ(1U, context_.scope().counterFromString("test.unknown_cluster").value()); } TEST_F(ThriftRouterTest, ClusterMaintenanceMode) { @@ -477,7 +477,7 @@ TEST_F(ThriftRouterTest, ClusterMaintenanceMode) { EXPECT_TRUE(end_stream); })); EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_)); - EXPECT_EQ(1U, context_.scope().counter("test.upstream_rq_maintenance_mode").value()); + EXPECT_EQ(1U, context_.scope().counterFromString("test.upstream_rq_maintenance_mode").value()); } TEST_F(ThriftRouterTest, NoHealthyHosts) { @@ -499,7 +499,7 @@ TEST_F(ThriftRouterTest, NoHealthyHosts) { })); EXPECT_EQ(FilterStatus::StopIteration, router_->messageBegin(metadata_)); - EXPECT_EQ(1U, context_.scope().counter("test.no_healthy_upstream").value()); + EXPECT_EQ(1U, context_.scope().counterFromString("test.no_healthy_upstream").value()); } TEST_F(ThriftRouterTest, TruncatedResponse) { diff --git a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc index 83387f31f846..d60c6dacd415 100644 --- a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc @@ -23,8 +23,8 @@ class ThriftTranslationIntegrationTest std::tuple>, public BaseThriftIntegrationTest { public: - static void SetUpTestSuite() { - thrift_config_ = ConfigHelper::BASE_CONFIG + R"EOF( + static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) + thrift_config_ = absl::StrCat(ConfigHelper::baseConfig(), R"EOF( filter_chains: filters: - name: thrift @@ -38,7 +38,7 @@ class ThriftTranslationIntegrationTest method_name: "add" route: cluster: "cluster_0" - )EOF"; + )EOF"); } void initialize() override { diff --git a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc index 5b1b77b9ae78..3d08c0eb95e2 100644 --- a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc +++ b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc @@ -1,3 +1,5 @@ +#include + #include "envoy/common/exception.h" #include "common/buffer/buffer_impl.h" @@ -38,7 +40,7 @@ class TestTwitterProtocolImpl : public TwitterProtocolImpl { class TwitterProtocolTest : public testing::Test { public: - void clearMetadata() { metadata_.reset(new MessageMetadata()); } + void clearMetadata() { metadata_ = std::make_shared(); } void resetMetadata() { clearMetadata(); diff --git a/test/extensions/filters/network/wasm/config_test.cc b/test/extensions/filters/network/wasm/config_test.cc index f3180e5eb9ef..6b4bae395a04 100644 --- a/test/extensions/filters/network/wasm/config_test.cc +++ b/test/extensions/filters/network/wasm/config_test.cc @@ -149,6 +149,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteWASM) { envoy::extensions::filters::network::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -160,8 +162,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteWASM) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(code); - callbacks.onSuccess(std::move(response)); - return nullptr; + callbacks.onSuccess(request, std::move(response)); + return &request; })); Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_); @@ -196,6 +198,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteConnectionReset) { envoy::extensions::filters::network::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -203,8 +207,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteConnectionReset) { .WillOnce( Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - callbacks.onFailure(Envoy::Http::AsyncClient::FailureReason::Reset); - return nullptr; + callbacks.onFailure(request, Envoy::Http::AsyncClient::FailureReason::Reset); + return &request; })); Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_); @@ -237,6 +241,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteSuccessWith503) { envoy::extensions::filters::network::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -245,9 +251,10 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteSuccessWith503) { Invoke([&](Http::RequestMessagePtr&, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callbacks.onSuccess( + request, Http::ResponseMessagePtr{new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "503"}}})}); - return nullptr; + return &request; })); Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_); @@ -279,6 +286,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteSuccessIncorrectSha256) { envoy::extensions::filters::network::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -290,8 +299,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteSuccessIncorrectSha256) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(code); - callbacks.onSuccess(std::move(response)); - return nullptr; + callbacks.onSuccess(request, std::move(response)); + return &request; })); Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_); @@ -325,6 +334,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteMultipleRetries) { envoy::extensions::filters::network::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); int num_retries = 3; EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillRepeatedly(ReturnRef(cluster_manager_.async_client_)); @@ -337,8 +348,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteMultipleRetries) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "503"}}})); response->body() = std::make_unique(code); - callbacks.onSuccess(std::move(response)); - return nullptr; + callbacks.onSuccess(request, std::move(response)); + return &request; })); EXPECT_CALL(*retry_timer_, enableTimer(_, _)) @@ -353,8 +364,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteMultipleRetries) { new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(code); - callbacks.onSuccess(std::move(response)); - return nullptr; + callbacks.onSuccess(request, std::move(response)); + return &request; })); } @@ -391,6 +402,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteSuccessBadcode) { envoy::extensions::filters::network::wasm::v3::Wasm proto_config; TestUtility::loadFromYaml(yaml, proto_config); WasmFilterConfig factory; + NiceMock client; + NiceMock request(&client); EXPECT_CALL(cluster_manager_, httpAsyncClientForCluster("cluster_1")) .WillOnce(ReturnRef(cluster_manager_.async_client_)); @@ -402,8 +415,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromRemoteSuccessBadcode) { new Http::ResponseMessageImpl(Http::ResponseHeaderMapPtr{ new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); response->body() = std::make_unique(code); - callbacks.onSuccess(std::move(response)); - return nullptr; + callbacks.onSuccess(request, std::move(response)); + return &request; })); Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context_); diff --git a/test/extensions/filters/network/zookeeper_proxy/filter_test.cc b/test/extensions/filters/network/zookeeper_proxy/filter_test.cc index ad2b0efc96b2..f818f403a0af 100644 --- a/test/extensions/filters/network/zookeeper_proxy/filter_test.cc +++ b/test/extensions/filters/network/zookeeper_proxy/filter_test.cc @@ -500,7 +500,7 @@ class ZooKeeperFilterTest : public testing::Test { return scope_.findHistogram(storage.statName()); } - Stats::IsolatedStoreImpl scope_; + Stats::TestUtil::TestStore scope_; ZooKeeperFilterConfigSharedPtr config_; std::unique_ptr filter_; std::string stat_prefix_{"test.zookeeper"}; diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD new file mode 100644 index 000000000000..a7c21842cd4c --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -0,0 +1,25 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +envoy_package() + +envoy_extension_cc_test( + name = "dns_filter_test", + srcs = ["dns_filter_test.cc"], + extension_name = "envoy.filters.udp_listener.dns_filter", + deps = [ + "//source/extensions/filters/udp/dns_filter:dns_filter_lib", + "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:environment_lib", + "@envoy_api//envoy/config/filter/udp/dns_filter/v2alpha:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc new file mode 100644 index 000000000000..72f349ff196a --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -0,0 +1,90 @@ +#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.h" +#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.validate.h" + +#include "common/common/logger.h" + +#include "extensions/filters/udp/dns_filter/dns_filter.h" + +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::AtLeast; +using testing::InSequence; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { +namespace { + +class DnsFilterTest : public testing::Test { +public: + DnsFilterTest() + : listener_address_(Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353")) { + + Logger::Registry::setLogLevel(spdlog::level::info); + + EXPECT_CALL(callbacks_, udpListener()).Times(AtLeast(0)); + } + + ~DnsFilterTest() override { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); } + + void setup(const std::string& yaml) { + envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig config; + TestUtility::loadFromYamlAndValidate(yaml, config); + auto store = stats_store_.createScope("dns_scope"); + EXPECT_CALL(listener_factory_, scope()).WillOnce(ReturnRef(*store)); + + config_ = std::make_shared(listener_factory_, config); + filter_ = std::make_unique(callbacks_, config_); + } + + const Network::Address::InstanceConstSharedPtr listener_address_; + Server::Configuration::MockListenerFactoryContext listener_factory_; + DnsFilterEnvoyConfigSharedPtr config_; + + std::unique_ptr filter_; + Network::MockUdpReadFilterCallbacks callbacks_; + Stats::IsolatedStoreImpl stats_store_; + Runtime::RandomGeneratorImpl rng_; + + const std::string config_yaml = R"EOF( +stat_prefix: "my_prefix" +server_config: + inline_dns_table: + external_retry_count: 3 + virtual_domains: + - name: "www.foo1.com" + endpoint: + address_list: + address: + - 10.0.0.1 + - 10.0.0.2 + - name: "www.foo2.com" + endpoint: + address_list: + address: + - 2001:8a:c1::2800:7 + - name: "www.foo3.com" + endpoint: + address_list: + address: + - 10.0.3.1 + )EOF"; +}; + +TEST_F(DnsFilterTest, TestConfig) { + InSequence s; + + setup(config_yaml); +} + +} // namespace +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc index 11dd261ec156..3e773189bcc9 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc @@ -66,6 +66,7 @@ class UdpProxyFilterTest : public testing::Test { int send_sys_errno = 0) { EXPECT_CALL(*idle_timer_, enableTimer(parent_.config_->sessionTimeout(), nullptr)); + EXPECT_CALL(*io_handle_, supportsMmsg()); // Return the datagram. EXPECT_CALL(*io_handle_, recvmsg(_, 1, _, _)) .WillOnce( @@ -77,11 +78,10 @@ class UdpProxyFilterTest : public testing::Test { } else { ASSERT(data.size() <= slices[0].len_); memcpy(slices[0].mem_, data.data(), data.size()); - output.peer_address_ = upstream_address_; + output.msg_[0].peer_address_ = upstream_address_; return makeNoError(data.size()); } })); - if (recv_sys_errno == 0) { // Send the datagram downstream. EXPECT_CALL(parent_.callbacks_.udp_listener_, send(_)) @@ -97,6 +97,7 @@ class UdpProxyFilterTest : public testing::Test { } })); // Return an EAGAIN result. + EXPECT_CALL(*io_handle_, supportsMmsg()); EXPECT_CALL(*io_handle_, recvmsg(_, 1, _, _)) .WillOnce(Return(ByMove(Api::IoCallUint64Result( 0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), @@ -124,7 +125,7 @@ class UdpProxyFilterTest : public testing::Test { .WillRepeatedly(Return(Upstream::Host::Health::Healthy)); } - ~UdpProxyFilterTest() { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); } + ~UdpProxyFilterTest() override { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); } void setup(const std::string& yaml, bool has_cluster = true) { envoy::config::filter::udp::udp_proxy::v2alpha::UdpProxyConfig config; diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc index 590a998a926c..88b51986362b 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc @@ -1,3 +1,5 @@ +#include + #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "test/integration/integration.h" @@ -12,14 +14,14 @@ class UdpProxyIntegrationTest : public testing::TestWithParam( #ifndef __APPLE__ "127.0.0.3", #else "127.0.0.1", #endif - port)); + port); } else { // IPv6 doesn't allow any non-local source address for sendmsg. And the only // local address guaranteed in tests in loopback. Unfortunately, even if it's not // specified, kernel will pick this address as source address. So this test // only checks if IoSocketHandle::sendmsg() sets up CMSG_DATA correctly, // i.e. cmsg_len is big enough when that code path is executed. - listener_address.reset(new Network::Address::Ipv6Instance("::1", port)); + listener_address = std::make_shared("::1", port); } requestResponseWithListenerAddress(*listener_address); diff --git a/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc b/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc index 41db5c69403e..f567b3d3e258 100644 --- a/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc +++ b/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc @@ -141,7 +141,7 @@ TEST_P(GrpcFileBasedMetadataClientIntegrationTest, ExtraConfigFileBasedMetadataG class MockAuthContext : public ::grpc::AuthContext { public: - ~MockAuthContext() override {} + ~MockAuthContext() override = default; MOCK_METHOD(bool, IsPeerAuthenticated, (), (const, override)); MOCK_METHOD(std::vector, GetPeerIdentity, (), (const, override)); MOCK_METHOD(std::string, GetPeerIdentityPropertyName, (), (const, override)); diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index 3e6e96f069a5..9d413879998c 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -53,9 +53,9 @@ class RedisHealthCheckerTest const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); - health_checker_.reset(new RedisHealthChecker( + health_checker_ = std::make_shared( *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this)); + Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } void setupAlwaysLogHealthCheckFailures() { @@ -77,9 +77,9 @@ class RedisHealthCheckerTest const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); - health_checker_.reset(new RedisHealthChecker( + health_checker_ = std::make_shared( *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this)); + Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } void setupExistsHealthcheck() { @@ -101,9 +101,9 @@ class RedisHealthCheckerTest const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); - health_checker_.reset(new RedisHealthChecker( + health_checker_ = std::make_shared( *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this)); + Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } void setupExistsHealthcheckDeprecated() { @@ -124,9 +124,9 @@ class RedisHealthCheckerTest const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); - health_checker_.reset(new RedisHealthChecker( + health_checker_ = std::make_shared( *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this)); + Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } void setupDontReuseConnection() { @@ -148,9 +148,9 @@ class RedisHealthCheckerTest const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); - health_checker_.reset(new RedisHealthChecker( + health_checker_ = std::make_shared( *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this)); + Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } Extensions::NetworkFilters::Common::Redis::Client::ClientPtr diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc index dcca8f3f244e..86c7ea98284a 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc @@ -31,7 +31,6 @@ using testing::_; using testing::Invoke; using testing::Return; -using testing::ReturnRef; namespace Envoy { namespace Quic { diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc index ee8547d60ceb..6b42b24f854e 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc @@ -18,7 +18,6 @@ namespace Quic { using testing::_; using testing::Invoke; -using testing::Return; class EnvoyQuicClientStreamTest : public testing::TestWithParam { public: @@ -111,6 +110,7 @@ INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientStreamTests, EnvoyQuicClientStreamTest, testing::ValuesIn({true, false})); TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) { + EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions()); quic_stream_->encodeHeaders(request_headers_, false); quic_stream_->encodeData(request_body_, true); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index 1ea819717ddd..748554d8182f 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -20,7 +20,6 @@ using testing::_; using testing::Invoke; -using testing::Return; namespace Envoy { namespace Quic { @@ -174,6 +173,7 @@ TEST_P(EnvoyQuicServerStreamTest, GetRequestAndResponse) { } TEST_P(EnvoyQuicServerStreamTest, PostRequestAndResponse) { + EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions()); sendRequest(request_body_, true, request_body_.size() * 2); quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/true); } diff --git a/test/extensions/quic_listeners/quiche/integration/BUILD b/test/extensions/quic_listeners/quiche/integration/BUILD index 50e556dd8420..e277da580d53 100644 --- a/test/extensions/quic_listeners/quiche/integration/BUILD +++ b/test/extensions/quic_listeners/quiche/integration/BUILD @@ -22,9 +22,11 @@ envoy_cc_test( "//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib", "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib", + "//source/extensions/resource_monitors/injected_resource:config", "//test/extensions/quic_listeners/quiche:quic_test_utils_for_envoy_lib", "//test/integration:http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", ], diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index 0409e6825786..0b754471987c 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -1,6 +1,7 @@ #include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/overload/v3/overload.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" @@ -42,17 +43,22 @@ class CodecClientCallbacksForTest : public Http::CodecClientCallbacks { Http::StreamResetReason last_stream_reset_reason_{Http::StreamResetReason::LocalReset}; }; -class QuicHttpIntegrationTest : public testing::TestWithParam, - public HttpIntegrationTest { +class QuicHttpIntegrationTest : public HttpIntegrationTest, + public testing::TestWithParam { public: QuicHttpIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP3, GetParam(), - ConfigHelper::QUIC_HTTP_PROXY_CONFIG), + ConfigHelper::quicHttpProxyConfig()), supported_versions_(quic::CurrentSupportedVersions()), crypto_config_(std::make_unique()), conn_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *conn_helper_.GetClock()) {} - - Network::ClientConnectionPtr makeClientConnection(uint32_t port) override { + alarm_factory_(*dispatcher_, *conn_helper_.GetClock()), + injected_resource_filename_(TestEnvironment::temporaryPath("injected_resource")), + file_updater_(injected_resource_filename_) {} + + Network::ClientConnectionPtr makeClientConnectionWithOptions( + uint32_t port, const Network::ConnectionSocket::OptionsSharedPtr& options) override { + // Setting socket options is not supported. + ASSERT(!options); server_addr_ = Network::Utility::resolveUrl( fmt::format("udp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); Network::Address::InstanceConstSharedPtr local_addr = @@ -109,6 +115,36 @@ class QuicHttpIntegrationTest : public testing::TestWithParammutable_typed_config()->PackFrom(tls_context); bootstrap.mutable_static_resources()->mutable_listeners(0)->set_reuse_port(set_reuse_port_); + + const std::string overload_config = fmt::format(R"EOF( + refresh_interval: + seconds: 0 + nanos: 1000000 + resource_monitors: + - name: "envoy.resource_monitors.injected_resource" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig + filename: "{}" + actions: + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.injected_resource" + threshold: + value: 0.95 + - name: "envoy.overload_actions.disable_http_keepalive" + triggers: + - name: "envoy.resource_monitors.injected_resource" + threshold: + value: 0.8 + - name: "envoy.overload_actions.stop_accepting_connections" + triggers: + - name: "envoy.resource_monitors.injected_resource" + threshold: + value: 0.9 + )EOF", + injected_resource_filename_); + *bootstrap.mutable_overload_manager() = + TestUtility::parseYaml(overload_config); }); config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -117,10 +153,13 @@ class QuicHttpIntegrationTest : public testing::TestWithParamset_allow_unexpected_disconnects(true); + + // Put envoy in overloaded state and check that it doesn't accept the new client connection. + updateResource(0.9); + test_server_->waitForGaugeEq("overload.envoy.overload_actions.stop_accepting_connections.active", + 1); + codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort("http")))); + EXPECT_TRUE(codec_client_->disconnected()); + + // Reduce load a little to allow the connection to be accepted connection. + updateResource(0.8); + test_server_->waitForGaugeEq("overload.envoy.overload_actions.stop_accepting_connections.active", + 0); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + // Send response headers, but hold response body for now. + upstream_request_->encodeHeaders(default_response_headers_, /*end_stream=*/false); + + updateResource(0.95); + test_server_->waitForGaugeEq("overload.envoy.overload_actions.stop_accepting_requests.active", 1); + // Existing request should be able to finish. + upstream_request_->encodeData(10, true); + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + + // New request should be rejected. + auto response2 = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response2->waitForEndStream(); + EXPECT_EQ("503", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("envoy overloaded", response2->body()); + codec_client_->close(); + + EXPECT_TRUE(makeRawHttpConnection(makeClientConnection((lookupPort("http"))))->disconnected()); +} + +TEST_P(QuicHttpIntegrationTest, AdminDrainDrainsListeners) { + testAdminDrain(Http::CodecClient::Type::HTTP1); +} + } // namespace Quic } // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index f2f435993545..d15743831a8a 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -730,7 +730,7 @@ TEST(EnvoyQuicMemSliceTest, ConstructMemSliceFromBuffer) { std::string str2(1024, 'a'); // str2 is copied. buffer.add(str2); - EXPECT_EQ(1u, buffer.getRawSlices(nullptr, 0)); + EXPECT_EQ(1u, buffer.getRawSlices().size()); buffer.addBufferFragment(fragment); quic::QuicMemSlice slice1{quic::QuicMemSliceImpl(buffer, str2.length())}; diff --git a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc index 512f54853d99..5eb1d9629d36 100644 --- a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc +++ b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc @@ -1,5 +1,6 @@ #include +#include #include #include "common/network/address_impl.h" @@ -56,7 +57,7 @@ TEST_F(QuicIoHandleWrapperTest, DelegateIoHandleCalls) { EXPECT_CALL(os_sys_calls_, sendmsg(fd, _, 0)).WillOnce(Return(Api::SysCallSizeResult{5u, 0})); wrapper_->sendmsg(&slice, 1, 0, /*self_ip=*/nullptr, *addr); - Network::IoHandle::RecvMsgOutput output(nullptr); + Network::IoHandle::RecvMsgOutput output(1, nullptr); EXPECT_CALL(os_sys_calls_, recvmsg(fd, _, 0)).WillOnce(Invoke([](os_fd_t, msghdr* msg, int) { sockaddr_storage ss; auto ipv6_addr = reinterpret_cast(&ss); @@ -66,17 +67,34 @@ TEST_F(QuicIoHandleWrapperTest, DelegateIoHandleCalls) { ipv6_addr->sin6_port = htons(54321); *reinterpret_cast(msg->msg_name) = *ipv6_addr; msg->msg_namelen = sizeof(sockaddr_in6); + msg->msg_controllen = 0; return Api::SysCallSizeResult{5u, 0}; })); wrapper_->recvmsg(&slice, 1, /*self_port=*/12345, output); + size_t num_packet_per_call = 1u; + Network::IoHandle::RecvMsgOutput output2(num_packet_per_call, nullptr); + RawSliceArrays slices(num_packet_per_call, + absl::FixedArray({Buffer::RawSlice{data, 5}})); + EXPECT_CALL(os_sys_calls_, recvmmsg(fd, _, num_packet_per_call, _, nullptr)) + .WillOnce(Invoke([](os_fd_t, struct mmsghdr*, unsigned int, int, struct timespec*) { + return Api::SysCallIntResult{1u, 0}; + })); + wrapper_->recvmmsg(slices, /*self_port=*/12345, output2); + EXPECT_TRUE(wrapper_->close().ok()); // Following calls shouldn't be delegated. wrapper_->readv(5, &slice, 1); wrapper_->writev(&slice, 1); wrapper_->sendmsg(&slice, 1, 0, /*self_ip=*/nullptr, *addr); - wrapper_->recvmsg(&slice, 1, /*self_port=*/12345, output); + EXPECT_DEBUG_DEATH(wrapper_->recvmsg(&slice, 1, /*self_port=*/12345, output), + "recvmmsg is called after close"); + EXPECT_DEBUG_DEATH(wrapper_->recvmmsg(slices, /*self_port=*/12345, output2), + "recvmmsg is called after close"); + + EXPECT_CALL(os_sys_calls_, supportsMmsg()); + wrapper_->supportsMmsg(); } } // namespace Quic diff --git a/test/extensions/stats_sinks/hystrix/BUILD b/test/extensions/stats_sinks/hystrix/BUILD index b7f169347e46..148a62fad7c5 100644 --- a/test/extensions/stats_sinks/hystrix/BUILD +++ b/test/extensions/stats_sinks/hystrix/BUILD @@ -39,3 +39,13 @@ envoy_extension_cc_test( "//test/mocks/upstream:upstream_mocks", ], ) + +envoy_extension_cc_test( + name = "hystrix_integration_test", + srcs = ["hystrix_integration_test.cc"], + extension_name = "envoy.stat_sinks.hystrix", + deps = [ + "//source/extensions/stat_sinks/hystrix:config", + "//test/integration:http_protocol_integration_lib", + ], +) diff --git a/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc new file mode 100644 index 000000000000..9a5667e6d581 --- /dev/null +++ b/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc @@ -0,0 +1,61 @@ +#include "test/integration/http_protocol_integration.h" + +using testing::HasSubstr; +using testing::Not; +using testing::StartsWith; + +namespace Envoy { + +class HystrixIntegrationTest : public HttpProtocolIntegrationTest {}; + +INSTANTIATE_TEST_SUITE_P(Protocols, HystrixIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2}, + {FakeHttpConnection::Type::HTTP1})), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +TEST_P(HystrixIntegrationTest, NoChunkEncoding) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* metrics_sink = bootstrap.add_stats_sinks(); + metrics_sink->set_name("envoy.stat_sinks.hystrix"); + bootstrap.mutable_stats_flush_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + }); + initialize(); + + if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { + // For HTTP/1.1 we use a raw client to make absolutely sure there is no chunk encoding. + Buffer::OwnedImpl buffer("GET /hystrix_event_stream HTTP/1.1\r\nHost: admin\r\n\r\n"); + std::string response; + RawConnectionDriver connection( + lookupPort("admin"), buffer, + [&](Network::ClientConnection& client, const Buffer::Instance& data) -> void { + response.append(data.toString()); + // Wait until there is a flush. + if (response.find("rollingCountCollapsedRequests") != std::string::npos) { + client.close(Network::ConnectionCloseType::NoFlush); + } + }, + version_); + connection.run(); + EXPECT_THAT(response, StartsWith("HTTP/1.1 200 OK\r\n")); + // Make sure that the response is not actually chunk encoded, but it does have the hystrix flush + // trailer. + EXPECT_THAT(response, Not(HasSubstr("chunked"))); + EXPECT_THAT(response, Not(HasSubstr("3\r\n:\n\n"))); + EXPECT_THAT(response, HasSubstr(":\n\n")); + connection.close(); + } else { + codec_client_ = makeHttpConnection(lookupPort("admin")); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/hystrix_event_stream"}, + {":scheme", "http"}, + {":authority", "admin"}}); + response->waitForBodyData(1); + EXPECT_THAT(response->body(), HasSubstr("rollingCountCollapsedRequests")); + codec_client_->close(); + } +} + +} // namespace Envoy diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index 212b2566c6d4..f8527b34e099 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -39,7 +39,7 @@ class ClusterTestInfo { // Set gauge value. membership_total_gauge_.name_ = "membership_total"; - ON_CALL(cluster_stats_scope_, gauge("membership_total", Stats::Gauge::ImportMode::Accumulate)) + ON_CALL(cluster_stats_scope_, gauge("membership_total", Stats::Gauge::ImportMode::NeverImport)) .WillByDefault(ReturnRef(membership_total_gauge_)); ON_CALL(membership_total_gauge_, value()).WillByDefault(Return(5)); @@ -513,10 +513,14 @@ TEST_F(HystrixSinkTest, HystrixEventStreamHandler) { auto addr_instance_ = Envoy::Network::Utility::parseInternetAddress("2.3.4.5", 123, false); + Http::MockHttp1StreamEncoderOptions stream_encoder_options; ON_CALL(admin_stream_mock, getDecoderFilterCallbacks()).WillByDefault(ReturnRef(callbacks_)); + ON_CALL(admin_stream_mock, http1StreamEncoderOptions()) + .WillByDefault(Return(Http::Http1StreamEncoderOptionsOptRef(stream_encoder_options))); ON_CALL(callbacks_, connection()).WillByDefault(Return(&connection_mock)); ON_CALL(connection_mock, remoteAddress()).WillByDefault(ReturnRef(addr_instance_)); + EXPECT_CALL(stream_encoder_options, disableChunkEncoding()); ASSERT_EQ( sink_->handlerHystrixEventStream(path_and_query, response_headers, buffer, admin_stream_mock), Http::Code::OK); diff --git a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc index 8076949ad6c7..d159f0de0a58 100644 --- a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc +++ b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc @@ -29,7 +29,7 @@ class TestDriver : public OpenTracingDriver { recorder_ = recorder; options.recorder.reset(recorder); options.propagation_options = propagation_options; - tracer_.reset(new opentracing::mocktracer::MockTracer{std::move(options)}); + tracer_ = std::make_shared(std::move(options)); } const opentracing::mocktracer::InMemoryRecorder& recorder() const { return *recorder_; } @@ -61,7 +61,7 @@ class OpenTracingDriverTest : public testing::Test { SystemTime start_time_; std::unique_ptr driver_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; NiceMock config_; }; diff --git a/test/extensions/tracers/datadog/BUILD b/test/extensions/tracers/datadog/BUILD index 8b0d233641fa..6ba8482ffdb5 100644 --- a/test/extensions/tracers/datadog/BUILD +++ b/test/extensions/tracers/datadog/BUILD @@ -23,7 +23,6 @@ envoy_extension_cc_test( "//source/common/http:headers_lib", "//source/common/http:message_lib", "//source/common/runtime:runtime_lib", - "//source/common/runtime:uuid_util_lib", "//source/extensions/tracers/datadog:datadog_tracer_lib", "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", diff --git a/test/extensions/tracers/datadog/config_test.cc b/test/extensions/tracers/datadog/config_test.cc index 5e958c43005c..2ba4a70398e2 100644 --- a/test/extensions/tracers/datadog/config_test.cc +++ b/test/extensions/tracers/datadog/config_test.cc @@ -41,7 +41,7 @@ TEST(DatadogTracerConfigTest, DatadogHttpTracer) { DatadogTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - Tracing::HttpTracerPtr datadog_tracer = factory.createHttpTracer(*message, context); + Tracing::HttpTracerSharedPtr datadog_tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, datadog_tracer); } diff --git a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc index 659d9f424309..a70ddb7e0b27 100644 --- a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc +++ b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc @@ -10,7 +10,6 @@ #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/runtime/runtime_impl.h" -#include "common/runtime/uuid_util.h" #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/datadog/datadog_tracer_impl.h" @@ -29,11 +28,15 @@ #include "gtest/gtest.h" using testing::_; +using testing::AnyNumber; +using testing::DoAll; using testing::Eq; using testing::Invoke; using testing::NiceMock; using testing::Return; using testing::ReturnRef; +using testing::StrictMock; +using testing::WithArg; namespace Envoy { namespace Extensions { @@ -44,6 +47,7 @@ namespace { class DatadogDriverTest : public testing::Test { public: void setup(envoy::config::trace::v3::DatadogConfig& datadog_config, bool init_timer) { + cm_.thread_local_cluster_.cluster_.info_->name_ = "fake_cluster"; ON_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) .WillByDefault(ReturnRef(cm_.async_client_)); @@ -78,7 +82,7 @@ class DatadogDriverTest : public testing::Test { NiceMock tls_; std::unique_ptr driver_; NiceMock* timer_; - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; NiceMock cm_; NiceMock random_; NiceMock runtime_; @@ -122,6 +126,21 @@ TEST_F(DatadogDriverTest, InitializeDriver) { } } +TEST_F(DatadogDriverTest, AllowCollectorClusterToBeAddedViaApi) { + EXPECT_CALL(cm_, get(Eq("fake_cluster"))).WillRepeatedly(Return(&cm_.thread_local_cluster_)); + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features()) + .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2)); + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, addedViaApi()).WillByDefault(Return(true)); + + const std::string yaml_string = R"EOF( + collector_cluster: fake_cluster + )EOF"; + envoy::config::trace::v3::DatadogConfig datadog_config; + TestUtility::loadFromYaml(yaml_string, datadog_config); + + setup(datadog_config, true); +} + TEST_F(DatadogDriverTest, FlushSpansTimer) { setupValidDriver(); @@ -158,13 +177,200 @@ TEST_F(DatadogDriverTest, FlushSpansTimer) { Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); msg->body() = std::make_unique(""); - callback->onSuccess(std::move(msg)); + callback->onSuccess(request, std::move(msg)); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_skipped_no_cluster").value()); EXPECT_EQ(1U, stats_.counter("tracing.datadog.reports_sent").value()); EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_dropped").value()); EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_failed").value()); } +TEST_F(DatadogDriverTest, SkipReportIfCollectorClusterHasBeenRemoved) { + Upstream::ClusterUpdateCallbacks* cluster_update_callbacks; + EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_)) + .WillOnce(DoAll(SaveArgAddress(&cluster_update_callbacks), Return(nullptr))); + + setupValidDriver(); + + EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(900), _)).Times(AnyNumber()); + + // Verify the effect of onClusterAddOrUpdate()/onClusterRemoval() on reporting logic, + // keeping in mind that they will be called both for relevant and irrelevant clusters. + + { + // Simulate removal of the relevant cluster. + cluster_update_callbacks->onClusterRemoval("fake_cluster"); + + // Verify that no report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0); + EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + timer_->invokeCallback(); + + // Verify observability. + EXPECT_EQ(1U, stats_.counter("tracing.datadog.timer_flushed").value()); + EXPECT_EQ(1U, stats_.counter("tracing.datadog.traces_sent").value()); + EXPECT_EQ(1U, stats_.counter("tracing.datadog.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_dropped").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_failed").value()); + } + + { + // Simulate addition of an irrelevant cluster. + NiceMock unrelated_cluster; + unrelated_cluster.cluster_.info_->name_ = "unrelated_cluster"; + cluster_update_callbacks->onClusterAddOrUpdate(unrelated_cluster); + + // Verify that no report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0); + EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + timer_->invokeCallback(); + + // Verify observability. + EXPECT_EQ(2U, stats_.counter("tracing.datadog.timer_flushed").value()); + EXPECT_EQ(2U, stats_.counter("tracing.datadog.traces_sent").value()); + EXPECT_EQ(2U, stats_.counter("tracing.datadog.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_dropped").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_failed").value()); + } + + { + // Simulate addition of the relevant cluster. + cluster_update_callbacks->onClusterAddOrUpdate(cm_.thread_local_cluster_); + + // Verify that report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) + .WillOnce(ReturnRef(cm_.async_client_)); + Http::MockAsyncClientRequest request(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback{}; + EXPECT_CALL(cm_.async_client_, send_(_, _, _)) + .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request))); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + timer_->invokeCallback(); + + // Complete in-flight request. + callback->onFailure(request, Http::AsyncClient::FailureReason::Reset); + + // Verify observability. + EXPECT_EQ(3U, stats_.counter("tracing.datadog.timer_flushed").value()); + EXPECT_EQ(3U, stats_.counter("tracing.datadog.traces_sent").value()); + EXPECT_EQ(2U, stats_.counter("tracing.datadog.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_dropped").value()); + EXPECT_EQ(1U, stats_.counter("tracing.datadog.reports_failed").value()); + } + + { + // Simulate removal of an irrelevant cluster. + cluster_update_callbacks->onClusterRemoval("unrelated_cluster"); + + // Verify that report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) + .WillOnce(ReturnRef(cm_.async_client_)); + Http::MockAsyncClientRequest request(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback{}; + EXPECT_CALL(cm_.async_client_, send_(_, _, _)) + .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request))); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + timer_->invokeCallback(); + + // Complete in-flight request. + Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl( + Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "404"}}})); + callback->onSuccess(request, std::move(msg)); + + // Verify observability. + EXPECT_EQ(4U, stats_.counter("tracing.datadog.timer_flushed").value()); + EXPECT_EQ(4U, stats_.counter("tracing.datadog.traces_sent").value()); + EXPECT_EQ(2U, stats_.counter("tracing.datadog.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.datadog.reports_sent").value()); + EXPECT_EQ(1U, stats_.counter("tracing.datadog.reports_dropped").value()); + EXPECT_EQ(1U, stats_.counter("tracing.datadog.reports_failed").value()); + } +} + +TEST_F(DatadogDriverTest, CancelInflightRequestsOnDestruction) { + setupValidDriver(); + + StrictMock request1(&cm_.async_client_), + request2(&cm_.async_client_), request3(&cm_.async_client_), request4(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback{}; + const absl::optional timeout(std::chrono::seconds(1)); + + // Expect 4 separate report requests to be made. + EXPECT_CALL(cm_.async_client_, + send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout))) + .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request1))) + .WillOnce(Return(&request2)) + .WillOnce(Return(&request3)) + .WillOnce(Return(&request4)); + // Expect timer to be re-enabled on each tick. + EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(900), _)).Times(4); + + // Trigger 1st report request. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + timer_->invokeCallback(); + // Trigger 2nd report request. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + timer_->invokeCallback(); + // Trigger 3rd report request. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + timer_->invokeCallback(); + // Trigger 4th report request. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + timer_->invokeCallback(); + + Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl( + Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "404"}}})); + // Simulate completion of the 2nd report request. + callback->onSuccess(request2, std::move(msg)); + + // Simulate failure of the 3rd report request. + callback->onFailure(request3, Http::AsyncClient::FailureReason::Reset); + + // Expect 1st and 4th requests to be cancelled on destruction. + EXPECT_CALL(request1, cancel()); + EXPECT_CALL(request4, cancel()); + + // Trigger destruction. + driver_.reset(); +} + } // namespace } // namespace Datadog } // namespace Tracers diff --git a/test/extensions/tracers/dynamic_ot/config_test.cc b/test/extensions/tracers/dynamic_ot/config_test.cc index edfa6049363d..44b1ba3dbfcb 100644 --- a/test/extensions/tracers/dynamic_ot/config_test.cc +++ b/test/extensions/tracers/dynamic_ot/config_test.cc @@ -45,7 +45,7 @@ TEST(DynamicOtTracerConfigTest, DynamicOpentracingHttpTracer) { DynamicOpenTracingTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - const Tracing::HttpTracerPtr tracer = factory.createHttpTracer(*message, context); + const Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, tracer); } diff --git a/test/extensions/tracers/lightstep/BUILD b/test/extensions/tracers/lightstep/BUILD index 4abc6edc9418..0d9ea91ba326 100644 --- a/test/extensions/tracers/lightstep/BUILD +++ b/test/extensions/tracers/lightstep/BUILD @@ -23,7 +23,6 @@ envoy_extension_cc_test( "//source/common/http:headers_lib", "//source/common/http:message_lib", "//source/common/runtime:runtime_lib", - "//source/common/runtime:uuid_util_lib", "//source/common/stats:fake_symbol_table_lib", "//source/extensions/tracers/lightstep:lightstep_tracer_lib", "//test/mocks/http:http_mocks", diff --git a/test/extensions/tracers/lightstep/config_test.cc b/test/extensions/tracers/lightstep/config_test.cc index be2c9f9c5e5a..866c31530631 100644 --- a/test/extensions/tracers/lightstep/config_test.cc +++ b/test/extensions/tracers/lightstep/config_test.cc @@ -41,7 +41,7 @@ TEST(LightstepTracerConfigTest, LightstepHttpTracer) { LightstepTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - Tracing::HttpTracerPtr lightstep_tracer = factory.createHttpTracer(*message, context); + Tracing::HttpTracerSharedPtr lightstep_tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, lightstep_tracer); } diff --git a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc index 9a2e6b013058..da83c6c09910 100644 --- a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc +++ b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc @@ -11,7 +11,6 @@ #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/runtime/runtime_impl.h" -#include "common/runtime/uuid_util.h" #include "common/stats/fake_symbol_table_impl.h" #include "common/tracing/http_tracer_impl.h" @@ -33,11 +32,13 @@ using testing::_; using testing::AtLeast; +using testing::DoAll; using testing::Eq; using testing::Invoke; using testing::NiceMock; using testing::Return; using testing::ReturnRef; +using testing::WithArg; namespace Envoy { namespace Extensions { @@ -71,6 +72,7 @@ class LightStepDriverTest : public testing::Test { opts->access_token = "sample_token"; opts->component_name = "component"; + cm_.thread_local_cluster_.cluster_.info_->name_ = "fake_cluster"; ON_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) .WillByDefault(ReturnRef(cm_.async_client_)); @@ -187,6 +189,47 @@ TEST_F(LightStepDriverTest, InitializeDriver) { } } +TEST_F(LightStepDriverTest, DeferredTlsInitialization) { + EXPECT_CALL(cm_, get(Eq("fake_cluster"))).WillRepeatedly(Return(&cm_.thread_local_cluster_)); + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features()) + .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2)); + + const std::string yaml_string = R"EOF( + collector_cluster: fake_cluster + )EOF"; + envoy::config::trace::v3::LightstepConfig lightstep_config; + TestUtility::loadFromYaml(yaml_string, lightstep_config); + + std::unique_ptr opts(new lightstep::LightStepTracerOptions()); + opts->access_token = "sample_token"; + opts->component_name = "component"; + + ON_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) + .WillByDefault(ReturnRef(cm_.async_client_)); + + auto propagation_mode = Common::Ot::OpenTracingDriver::PropagationMode::TracerNative; + + tls_.defer_data = true; + driver_ = std::make_unique(lightstep_config, cm_, stats_, tls_, runtime_, + std::move(opts), propagation_mode, grpc_context_); + tls_.call(); +} + +TEST_F(LightStepDriverTest, AllowCollectorClusterToBeAddedViaApi) { + EXPECT_CALL(cm_, get(Eq("fake_cluster"))).WillRepeatedly(Return(&cm_.thread_local_cluster_)); + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features()) + .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2)); + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, addedViaApi()).WillByDefault(Return(true)); + + const std::string yaml_string = R"EOF( + collector_cluster: fake_cluster + )EOF"; + envoy::config::trace::v3::LightstepConfig lightstep_config; + TestUtility::loadFromYaml(yaml_string, lightstep_config); + + setup(lightstep_config, true); +} + TEST_F(LightStepDriverTest, FlushSeveralSpans) { setupValidDriver(2); @@ -229,7 +272,7 @@ TEST_F(LightStepDriverTest, FlushSeveralSpans) { start_time_, {Tracing::Reason::Sampling, true}); third_span->finishSpan(); - callback->onSuccess(makeSuccessResponse()); + callback->onSuccess(request, makeSuccessResponse()); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("grpc.lightstep.collector.CollectorService.Report.success") @@ -239,6 +282,123 @@ TEST_F(LightStepDriverTest, FlushSeveralSpans) { .counter("grpc.lightstep.collector.CollectorService.Report.total") .value()); EXPECT_EQ(2U, stats_.counter("tracing.lightstep.spans_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.reports_skipped_no_cluster").value()); +} + +TEST_F(LightStepDriverTest, SkipReportIfCollectorClusterHasBeenRemoved) { + Upstream::ClusterUpdateCallbacks* cluster_update_callbacks; + EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_)) + .WillOnce(DoAll(SaveArgAddress(&cluster_update_callbacks), Return(nullptr))); + + setupValidDriver(1); + + EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.lightstep.request_timeout", 5000U)) + .WillRepeatedly(Return(5000U)); + + // Verify the effect of onClusterAddOrUpdate()/onClusterRemoval() on reporting logic, + // keeping in mind that they will be called both for relevant and irrelevant clusters. + + { + // Simulate removal of the relevant cluster. + cluster_update_callbacks->onClusterRemoval("fake_cluster"); + + // Verify that no report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0); + EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + driver_->flush(); + + // Verify observability. + EXPECT_EQ(1U, stats_.counter("tracing.lightstep.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.spans_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.spans_dropped").value()); + } + + { + // Simulate addition of an irrelevant cluster. + NiceMock unrelated_cluster; + unrelated_cluster.cluster_.info_->name_ = "unrelated_cluster"; + cluster_update_callbacks->onClusterAddOrUpdate(unrelated_cluster); + + // Verify that no report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0); + EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + driver_->flush(); + + // Verify observability. + EXPECT_EQ(2U, stats_.counter("tracing.lightstep.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.spans_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.spans_dropped").value()); + } + + { + // Simulate addition of the relevant cluster. + cluster_update_callbacks->onClusterAddOrUpdate(cm_.thread_local_cluster_); + + // Verify that report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) + .WillOnce(ReturnRef(cm_.async_client_)); + Http::MockAsyncClientRequest request(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback{}; + EXPECT_CALL(cm_.async_client_, send_(_, _, _)) + .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request))); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + driver_->flush(); + + // Complete in-flight request. + callback->onFailure(request, Http::AsyncClient::FailureReason::Reset); + + // Verify observability. + EXPECT_EQ(2U, stats_.counter("tracing.lightstep.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.spans_sent").value()); + EXPECT_EQ(1U, stats_.counter("tracing.lightstep.spans_dropped").value()); + } + + { + // Simulate removal of an irrelevant cluster. + cluster_update_callbacks->onClusterRemoval("unrelated_cluster"); + + // Verify that report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) + .WillOnce(ReturnRef(cm_.async_client_)); + Http::MockAsyncClientRequest request(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback{}; + EXPECT_CALL(cm_.async_client_, send_(_, _, _)) + .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request))); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + driver_->flush(); + + // Complete in-flight request. + Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl( + Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); + callback->onSuccess(request, std::move(msg)); + + // Verify observability. + EXPECT_EQ(2U, stats_.counter("tracing.lightstep.reports_skipped_no_cluster").value()); + EXPECT_EQ(1U, stats_.counter("tracing.lightstep.spans_sent").value()); + EXPECT_EQ(1U, stats_.counter("tracing.lightstep.spans_dropped").value()); + } } TEST_F(LightStepDriverTest, FlushOneFailure) { @@ -277,7 +437,7 @@ TEST_F(LightStepDriverTest, FlushOneFailure) { second_span->finishSpan(); - callback->onFailure(Http::AsyncClient::FailureReason::Reset); + callback->onFailure(request, Http::AsyncClient::FailureReason::Reset); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("grpc.lightstep.collector.CollectorService.Report.failure") @@ -286,6 +446,7 @@ TEST_F(LightStepDriverTest, FlushOneFailure) { .counter("grpc.lightstep.collector.CollectorService.Report.total") .value()); EXPECT_EQ(1U, stats_.counter("tracing.lightstep.spans_dropped").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.reports_skipped_no_cluster").value()); } TEST_F(LightStepDriverTest, FlushWithActiveReport) { @@ -327,6 +488,7 @@ TEST_F(LightStepDriverTest, FlushWithActiveReport) { driver_->flush(); EXPECT_EQ(1U, stats_.counter("tracing.lightstep.spans_dropped").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.reports_skipped_no_cluster").value()); EXPECT_CALL(request, cancel()); @@ -375,6 +537,7 @@ TEST_F(LightStepDriverTest, OnFullWithActiveReport) { ->finishSpan(); EXPECT_EQ(1U, stats_.counter("tracing.lightstep.spans_dropped").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.reports_skipped_no_cluster").value()); EXPECT_CALL(request, cancel()); @@ -411,10 +574,11 @@ TEST_F(LightStepDriverTest, FlushSpansTimer) { timer_->invokeCallback(); - callback->onSuccess(makeSuccessResponse()); + callback->onSuccess(request, makeSuccessResponse()); EXPECT_EQ(1U, stats_.counter("tracing.lightstep.timer_flushed").value()); EXPECT_EQ(1U, stats_.counter("tracing.lightstep.spans_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.lightstep.reports_skipped_no_cluster").value()); } TEST_F(LightStepDriverTest, CancelRequestOnDestruction) { @@ -494,6 +658,44 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { } } +TEST_F(LightStepDriverTest, MultiplePropagationModes) { + const std::string yaml_string = R"EOF( + collector_cluster: fake_cluster + propagation_modes: + - ENVOY + - LIGHTSTEP + - B3 + - TRACE_CONTEXT + )EOF"; + envoy::config::trace::v3::LightstepConfig lightstep_config; + TestUtility::loadFromYaml(yaml_string, lightstep_config); + + EXPECT_CALL(cm_, get(Eq("fake_cluster"))).WillRepeatedly(Return(&cm_.thread_local_cluster_)); + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features()) + .WillByDefault(Return(Upstream::ClusterInfo::Features::HTTP2)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.lightstep.flush_interval_ms", _)) + .Times(AtLeast(1)) + .WillRepeatedly(Return(1000)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.lightstep.min_flush_spans", + LightStepDriver::DefaultMinFlushSpans)) + .Times(AtLeast(1)) + .WillRepeatedly(Return(1)); + + setup(lightstep_config, true); + + Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + + EXPECT_EQ(nullptr, request_headers_.OtSpanContext()); + span->injectContext(request_headers_); + EXPECT_TRUE(request_headers_.has("x-ot-span-context")); + EXPECT_TRUE(request_headers_.has("ot-tracer-traceid")); + EXPECT_TRUE(request_headers_.has("x-b3-traceid")); + EXPECT_TRUE(request_headers_.has("traceparent")); +} + TEST_F(LightStepDriverTest, SpawnChild) { setupValidDriver(); diff --git a/test/extensions/tracers/opencensus/config_test.cc b/test/extensions/tracers/opencensus/config_test.cc index 1b95e1c81969..a84f89e7861f 100644 --- a/test/extensions/tracers/opencensus/config_test.cc +++ b/test/extensions/tracers/opencensus/config_test.cc @@ -16,6 +16,52 @@ namespace Extensions { namespace Tracers { namespace OpenCensus { +TEST(OpenCensusTracerConfigTest, InvalidStackdriverConfiguration) { + NiceMock context; + OpenCensusTracerFactory factory; + + const std::string yaml_string = R"EOF( + http: + name: envoy.tracers.opencensus + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig + stackdriver_exporter_enabled: true + stackdriver_grpc_service: + envoy_grpc: + cluster_name: stackdriver + )EOF"; + envoy::config::trace::v3::Tracing configuration; + TestUtility::loadFromYaml(yaml_string, configuration); + + auto message = Config::Utility::translateToFactoryConfig( + configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message, context)), EnvoyException, + "Opencensus stackdriver tracer only support GoogleGrpc."); +} + +TEST(OpenCensusTracerConfigTest, InvalidOcagentConfiguration) { + NiceMock context; + OpenCensusTracerFactory factory; + + const std::string yaml_string = R"EOF( + http: + name: envoy.tracers.opencensus + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig + ocagent_exporter_enabled: true + ocagent_grpc_service: + envoy_grpc: + cluster_name: opencensus + )EOF"; + envoy::config::trace::v3::Tracing configuration; + TestUtility::loadFromYaml(yaml_string, configuration); + + auto message = Config::Utility::translateToFactoryConfig( + configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message, context)), EnvoyException, + "Opencensus ocagent tracer only supports GoogleGrpc."); +} + TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracer) { NiceMock context; const std::string yaml_string = R"EOF( @@ -29,7 +75,7 @@ TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracer) { OpenCensusTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - Tracing::HttpTracerPtr tracer = factory.createHttpTracer(*message, context); + Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, tracer); } @@ -67,7 +113,7 @@ TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerWithTypedConfig) { OpenCensusTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - Tracing::HttpTracerPtr tracer = factory.createHttpTracer(*message, context); + Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, tracer); // Reset TraceParams back to default. @@ -107,7 +153,7 @@ TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerGrpc) { OpenCensusTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - Tracing::HttpTracerPtr tracer = factory.createHttpTracer(*message, context); + Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, tracer); // Reset TraceParams back to default. @@ -115,6 +161,142 @@ TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerGrpc) { {32, 32, 128, 32, ::opencensus::trace::ProbabilitySampler(1e-4)}); } +TEST(OpenCensusTracerConfigTest, ShouldCreateAtMostOneOpenCensusTracer) { + NiceMock context; + OpenCensusTracerFactory factory; + + const std::string yaml_string = R"EOF( + http: + name: envoy.tracers.opencensus + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig + trace_config: + rate_limiting_sampler: + qps: 123 + )EOF"; + envoy::config::trace::v3::Tracing configuration; + TestUtility::loadFromYaml(yaml_string, configuration); + + auto message_one = Config::Utility::translateToFactoryConfig( + configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + Tracing::HttpTracerSharedPtr tracer_one = factory.createHttpTracer(*message_one, context); + EXPECT_NE(nullptr, tracer_one); + + auto message_two = Config::Utility::translateToFactoryConfig( + configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + Tracing::HttpTracerSharedPtr tracer_two = factory.createHttpTracer(*message_two, context); + // Verify that no new tracer has been created. + EXPECT_EQ(tracer_two, tracer_one); +} + +TEST(OpenCensusTracerConfigTest, ShouldCacheFirstCreatedTracerUsingStrongReference) { + NiceMock context; + OpenCensusTracerFactory factory; + + const std::string yaml_string = R"EOF( + http: + name: envoy.tracers.opencensus + )EOF"; + envoy::config::trace::v3::Tracing configuration; + TestUtility::loadFromYaml(yaml_string, configuration); + + auto message_one = Config::Utility::translateToFactoryConfig( + configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + std::weak_ptr tracer_one = factory.createHttpTracer(*message_one, context); + // Verify that tracer factory keeps a strong reference. + EXPECT_NE(nullptr, tracer_one.lock()); + + auto message_two = Config::Utility::translateToFactoryConfig( + configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + Tracing::HttpTracerSharedPtr tracer_two = factory.createHttpTracer(*message_two, context); + EXPECT_NE(nullptr, tracer_two); + // Verify that no new tracer has been created. + EXPECT_EQ(tracer_two, tracer_one.lock()); +} + +TEST(OpenCensusTracerConfigTest, ShouldNotCacheInvalidConfiguration) { + NiceMock context; + OpenCensusTracerFactory factory; + + const std::string yaml_one = R"EOF( + http: + name: envoy.tracers.opencensus + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig + ocagent_exporter_enabled: true + ocagent_grpc_service: + envoy_grpc: + cluster_name: opencensus + )EOF"; + envoy::config::trace::v3::Tracing configuration_one; + TestUtility::loadFromYaml(yaml_one, configuration_one); + + auto message_one = Config::Utility::translateToFactoryConfig( + configuration_one.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message_one, context)), EnvoyException, + "Opencensus ocagent tracer only supports GoogleGrpc."); + + const std::string yaml_two = R"EOF( + http: + name: envoy.tracers.opencensus + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig + ocagent_exporter_enabled: true + ocagent_grpc_service: + google_grpc: + target_uri: 127.0.0.1:55678 + stat_prefix: test + )EOF"; + envoy::config::trace::v3::Tracing configuration_two; + TestUtility::loadFromYaml(yaml_two, configuration_two); + + auto message_two = Config::Utility::translateToFactoryConfig( + configuration_two.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + Tracing::HttpTracerSharedPtr tracer_two = factory.createHttpTracer(*message_two, context); + // Verify that a new tracer has been created despite an earlier failed attempt. + EXPECT_NE(nullptr, tracer_two); +} + +TEST(OpenCensusTracerConfigTest, ShouldRejectSubsequentCreateAttemptsWithDifferentConfig) { + NiceMock context; + OpenCensusTracerFactory factory; + + const std::string yaml_one = R"EOF( + http: + name: envoy.tracers.opencensus + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig + trace_config: + rate_limiting_sampler: + qps: 123 + )EOF"; + envoy::config::trace::v3::Tracing configuration_one; + TestUtility::loadFromYaml(yaml_one, configuration_one); + + auto message_one = Config::Utility::translateToFactoryConfig( + configuration_one.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + Tracing::HttpTracerSharedPtr tracer_one = factory.createHttpTracer(*message_one, context); + EXPECT_NE(nullptr, tracer_one); + + const std::string yaml_two = R"EOF( + http: + name: envoy.tracers.opencensus + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig + trace_config: + rate_limiting_sampler: + qps: 321 + )EOF"; + envoy::config::trace::v3::Tracing configuration_two; + TestUtility::loadFromYaml(yaml_two, configuration_two); + + auto message_two = Config::Utility::translateToFactoryConfig( + configuration_two.http(), ProtobufMessage::getStrictValidationVisitor(), factory); + // Verify that OpenCensus is only configured once in a lifetime. + EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message_two, context)), EnvoyException, + "Opencensus has already been configured with a different config."); +} + TEST(OpenCensusTracerConfigTest, DoubleRegistrationTest) { EXPECT_THROW_WITH_MESSAGE( (Registry::RegisterFactory()), diff --git a/test/extensions/tracers/xray/config_test.cc b/test/extensions/tracers/xray/config_test.cc index 992c6bc06921..be281bf47281 100644 --- a/test/extensions/tracers/xray/config_test.cc +++ b/test/extensions/tracers/xray/config_test.cc @@ -41,7 +41,7 @@ TEST(XRayTracerConfigTest, XRayHttpTracerWithTypedConfig) { XRayTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - Tracing::HttpTracerPtr xray_tracer = factory.createHttpTracer(*message, context); + Tracing::HttpTracerSharedPtr xray_tracer = factory.createHttpTracer(*message, context); ASSERT_NE(nullptr, xray_tracer); } @@ -76,7 +76,7 @@ TEST(XRayTracerConfigTest, XRayHttpTracerWithInvalidFileName) { auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - Tracing::HttpTracerPtr xray_tracer = factory.createHttpTracer(*message, context); + Tracing::HttpTracerSharedPtr xray_tracer = factory.createHttpTracer(*message, context); ASSERT_NE(nullptr, xray_tracer); } diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index 6c15c69d6821..ae0dc558cb2d 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -107,7 +107,7 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { daemon::Segment s; MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); ASSERT_STREQ(expected_parent_id.c_str(), s.parent_id().c_str()); - ASSERT_STREQ(expected_operation_name, s.name().c_str()); + ASSERT_STREQ(expected_span_name, s.name().c_str()); ASSERT_STREQ(xray_parent_span->traceId().c_str(), s.trace_id().c_str()); ASSERT_STRNE(xray_parent_span->Id().c_str(), s.id().c_str()); }; @@ -145,9 +145,9 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { span->injectContext(request_headers); auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); ASSERT_NE(header, nullptr); - ASSERT_NE(header->value().getStringView().find("root="), absl::string_view::npos); - ASSERT_NE(header->value().getStringView().find("parent="), absl::string_view::npos); - ASSERT_NE(header->value().getStringView().find("sampled=1"), absl::string_view::npos); + ASSERT_NE(header->value().getStringView().find("Root="), absl::string_view::npos); + ASSERT_NE(header->value().getStringView().find("Parent="), absl::string_view::npos); + ASSERT_NE(header->value().getStringView().find("Sampled=1"), absl::string_view::npos); } TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { @@ -158,9 +158,9 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { span->injectContext(request_headers); auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); ASSERT_NE(header, nullptr); - ASSERT_NE(header->value().getStringView().find("root="), absl::string_view::npos); - ASSERT_NE(header->value().getStringView().find("parent="), absl::string_view::npos); - ASSERT_NE(header->value().getStringView().find("sampled=0"), absl::string_view::npos); + ASSERT_NE(header->value().getStringView().find("Root="), absl::string_view::npos); + ASSERT_NE(header->value().getStringView().find("Parent="), absl::string_view::npos); + ASSERT_NE(header->value().getStringView().find("Sampled=0"), absl::string_view::npos); } TEST_F(XRayTracerTest, TraceIDFormatTest) { diff --git a/test/extensions/tracers/xray/xray_tracer_impl_test.cc b/test/extensions/tracers/xray/xray_tracer_impl_test.cc index 992158a80f4b..81f9532cb4a8 100644 --- a/test/extensions/tracers/xray/xray_tracer_impl_test.cc +++ b/test/extensions/tracers/xray/xray_tracer_impl_test.cc @@ -13,8 +13,6 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using ::testing::ReturnRef; - namespace Envoy { namespace Extensions { namespace Tracers { @@ -92,21 +90,6 @@ TEST_F(XRayDriverTest, NoXRayTracerHeader) { ASSERT_NE(span, nullptr); } -TEST_F(XRayDriverTest, EmptySegmentNameDefaultToClusterName) { - const std::string cluster_name = "FooBar"; - EXPECT_CALL(context_.server_factory_context_.local_info_, clusterName()) - .WillRepeatedly(ReturnRef(cluster_name)); - XRayConfiguration config{"" /*daemon_endpoint*/, "", "" /*sampling_rules*/}; - Driver driver(config, context_); - - Tracing::Decision tracing_decision{Tracing::Reason::Sampling, true /*sampled*/}; - Envoy::SystemTime start_time; - auto span = driver.startSpan(tracing_config_, request_headers_, operation_name_, start_time, - tracing_decision); - auto* xray_span = static_cast(span.get()); - ASSERT_STREQ(xray_span->name().c_str(), cluster_name.c_str()); -} - } // namespace } // namespace XRay } // namespace Tracers diff --git a/test/extensions/tracers/zipkin/config_test.cc b/test/extensions/tracers/zipkin/config_test.cc index f1556abfe95e..4e63c419febd 100644 --- a/test/extensions/tracers/zipkin/config_test.cc +++ b/test/extensions/tracers/zipkin/config_test.cc @@ -40,7 +40,7 @@ TEST(ZipkinTracerConfigTest, ZipkinHttpTracer) { ZipkinTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - Tracing::HttpTracerPtr zipkin_tracer = factory.createHttpTracer(*message, context); + Tracing::HttpTracerSharedPtr zipkin_tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, zipkin_tracer); } @@ -67,7 +67,7 @@ TEST(ZipkinTracerConfigTest, ZipkinHttpTracerWithTypedConfig) { ZipkinTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); - Tracing::HttpTracerPtr zipkin_tracer = factory.createHttpTracer(*message, context); + Tracing::HttpTracerSharedPtr zipkin_tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, zipkin_tracer); } diff --git a/test/extensions/tracers/zipkin/zipkin_core_types_test.cc b/test/extensions/tracers/zipkin/zipkin_core_types_test.cc index 1b7d66ebb1ee..97ef5254db05 100644 --- a/test/extensions/tracers/zipkin/zipkin_core_types_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_core_types_test.cc @@ -71,7 +71,7 @@ TEST(ZipkinCoreTypesEndpointTest, copyOperator) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:3306"); Endpoint ep1(std::string("my_service"), addr); - Endpoint ep2(ep1); + const Endpoint& ep2(ep1); EXPECT_EQ("my_service", ep1.serviceName()); EXPECT_TRUE(TestUtility::protoEqual( @@ -86,7 +86,7 @@ TEST(ZipkinCoreTypesEndpointTest, assignmentOperator) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:3306"); Endpoint ep1(std::string("my_service"), addr); - Endpoint ep2 = ep1; + const Endpoint& ep2 = ep1; EXPECT_EQ("my_service", ep1.serviceName()); EXPECT_TRUE(TestUtility::protoEqual( @@ -194,7 +194,7 @@ TEST(ZipkinCoreTypesAnnotationTest, copyConstructor) { test_time.timeSystem().systemTime().time_since_epoch()) .count(); Annotation ann(timestamp, CLIENT_SEND, ep); - Annotation ann2(ann); + const Annotation& ann2(ann); EXPECT_EQ(ann.value(), ann2.value()); EXPECT_EQ(ann.timestamp(), ann2.timestamp()); @@ -212,7 +212,7 @@ TEST(ZipkinCoreTypesAnnotationTest, assignmentOperator) { test_time.timeSystem().systemTime().time_since_epoch()) .count(); Annotation ann(timestamp, CLIENT_SEND, ep); - Annotation ann2 = ann; + const Annotation& ann2 = ann; EXPECT_EQ(ann.value(), ann2.value()); EXPECT_EQ(ann.timestamp(), ann2.timestamp()); @@ -288,7 +288,7 @@ TEST(ZipkinCoreTypesBinaryAnnotationTest, customConstructor) { TEST(ZipkinCoreTypesBinaryAnnotationTest, copyConstructor) { BinaryAnnotation ann("key", "value"); - BinaryAnnotation ann2(ann); + const BinaryAnnotation& ann2(ann); EXPECT_EQ(ann.value(), ann2.value()); EXPECT_EQ(ann.key(), ann2.key()); @@ -299,7 +299,7 @@ TEST(ZipkinCoreTypesBinaryAnnotationTest, copyConstructor) { TEST(ZipkinCoreTypesBinaryAnnotationTest, assignmentOperator) { BinaryAnnotation ann("key", "value"); - BinaryAnnotation ann2 = ann; + const BinaryAnnotation& ann2 = ann; EXPECT_EQ(ann.value(), ann2.value()); EXPECT_EQ(ann.key(), ann2.key()); diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index a898fbb2e7e9..477d8ce6adef 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -10,7 +10,6 @@ #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/runtime/runtime_impl.h" -#include "common/runtime/uuid_util.h" #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/zipkin/zipkin_core_constants.h" @@ -29,11 +28,14 @@ #include "gtest/gtest.h" using testing::_; +using testing::DoAll; using testing::Eq; using testing::Invoke; using testing::NiceMock; using testing::Return; using testing::ReturnRef; +using testing::StrictMock; +using testing::WithArg; namespace Envoy { namespace Extensions { @@ -46,6 +48,7 @@ class ZipkinDriverTest : public testing::Test { ZipkinDriverTest() : time_source_(test_time_.timeSystem()) {} void setup(envoy::config::trace::v3::ZipkinConfig& zipkin_config, bool init_timer) { + cm_.thread_local_cluster_.cluster_.info_->name_ = "fake_cluster"; ON_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) .WillByDefault(ReturnRef(cm_.async_client_)); @@ -111,14 +114,15 @@ class ZipkinDriverTest : public testing::Test { Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "202"}}})); - callback->onSuccess(std::move(msg)); + callback->onSuccess(request, std::move(msg)); EXPECT_EQ(2U, stats_.counter("tracing.zipkin.spans_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_skipped_no_cluster").value()); EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_sent").value()); EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_dropped").value()); EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_failed").value()); - callback->onFailure(Http::AsyncClient::FailureReason::Reset); + callback->onFailure(request, Http::AsyncClient::FailureReason::Reset); EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_failed").value()); } @@ -158,7 +162,7 @@ TEST_F(ZipkinDriverTest, InitializeDriver) { } { - // Valid config but not valid cluster. + // Valid config but collector cluster doesn't exists. EXPECT_CALL(cm_, get(Eq("fake_cluster"))).WillOnce(Return(nullptr)); const std::string yaml_string = R"EOF( collector_cluster: fake_cluster @@ -186,6 +190,21 @@ TEST_F(ZipkinDriverTest, InitializeDriver) { } } +TEST_F(ZipkinDriverTest, AllowCollectorClusterToBeAddedViaApi) { + EXPECT_CALL(cm_, get(Eq("fake_cluster"))).WillRepeatedly(Return(&cm_.thread_local_cluster_)); + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, features()).WillByDefault(Return(0)); + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, addedViaApi()).WillByDefault(Return(true)); + + const std::string yaml_string = R"EOF( + collector_cluster: fake_cluster + collector_endpoint: /api/v1/spans + )EOF"; + envoy::config::trace::v3::ZipkinConfig zipkin_config; + TestUtility::loadFromYaml(yaml_string, zipkin_config); + + setup(zipkin_config, true); +} + TEST_F(ZipkinDriverTest, FlushSeveralSpans) { expectValidFlushSeveralSpans("HTTP_JSON_V1", "application/json"); } @@ -236,14 +255,197 @@ TEST_F(ZipkinDriverTest, FlushOneSpanReportFailure) { Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "404"}}})); // AsyncClient can fail with valid HTTP headers - callback->onSuccess(std::move(msg)); + callback->onSuccess(request, std::move(msg)); EXPECT_EQ(1U, stats_.counter("tracing.zipkin.spans_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_skipped_no_cluster").value()); EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_sent").value()); EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_dropped").value()); EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_failed").value()); } +TEST_F(ZipkinDriverTest, SkipReportIfCollectorClusterHasBeenRemoved) { + Upstream::ClusterUpdateCallbacks* cluster_update_callbacks; + EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_)) + .WillOnce(DoAll(SaveArgAddress(&cluster_update_callbacks), Return(nullptr))); + + setupValidDriver("HTTP_JSON_V1"); + + EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.zipkin.min_flush_spans", 5)) + .WillRepeatedly(Return(1)); + EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.zipkin.request_timeout", 5000U)) + .WillRepeatedly(Return(5000U)); + + // Verify the effect of onClusterAddOrUpdate()/onClusterRemoval() on reporting logic, + // keeping in mind that they will be called both for relevant and irrelevant clusters. + + { + // Simulate removal of the relevant cluster. + cluster_update_callbacks->onClusterRemoval("fake_cluster"); + + // Verify that no report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0); + EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + + // Verify observability. + EXPECT_EQ(1U, stats_.counter("tracing.zipkin.spans_sent").value()); + EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_dropped").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_failed").value()); + } + + { + // Simulate addition of an irrelevant cluster. + NiceMock unrelated_cluster; + unrelated_cluster.cluster_.info_->name_ = "unrelated_cluster"; + cluster_update_callbacks->onClusterAddOrUpdate(unrelated_cluster); + + // Verify that no report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster(_)).Times(0); + EXPECT_CALL(cm_.async_client_, send_(_, _, _)).Times(0); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + + // Verify observability. + EXPECT_EQ(2U, stats_.counter("tracing.zipkin.spans_sent").value()); + EXPECT_EQ(2U, stats_.counter("tracing.zipkin.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_dropped").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_failed").value()); + } + + { + // Simulate addition of the relevant cluster. + cluster_update_callbacks->onClusterAddOrUpdate(cm_.thread_local_cluster_); + + // Verify that report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) + .WillOnce(ReturnRef(cm_.async_client_)); + Http::MockAsyncClientRequest request(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback{}; + EXPECT_CALL(cm_.async_client_, send_(_, _, _)) + .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request))); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + + // Complete in-flight request. + callback->onFailure(request, Http::AsyncClient::FailureReason::Reset); + + // Verify observability. + EXPECT_EQ(3U, stats_.counter("tracing.zipkin.spans_sent").value()); + EXPECT_EQ(2U, stats_.counter("tracing.zipkin.reports_skipped_no_cluster").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_dropped").value()); + EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_failed").value()); + } + + { + // Simulate removal of an irrelevant cluster. + cluster_update_callbacks->onClusterRemoval("unrelated_cluster"); + + // Verify that report will be sent. + EXPECT_CALL(cm_, httpAsyncClientForCluster("fake_cluster")) + .WillOnce(ReturnRef(cm_.async_client_)); + Http::MockAsyncClientRequest request(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback{}; + EXPECT_CALL(cm_.async_client_, send_(_, _, _)) + .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request))); + + // Trigger flush of a span. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + + // Complete in-flight request. + Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl( + Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "202"}}})); + callback->onSuccess(request, std::move(msg)); + + // Verify observability. + EXPECT_EQ(4U, stats_.counter("tracing.zipkin.spans_sent").value()); + EXPECT_EQ(2U, stats_.counter("tracing.zipkin.reports_skipped_no_cluster").value()); + EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_sent").value()); + EXPECT_EQ(0U, stats_.counter("tracing.zipkin.reports_dropped").value()); + EXPECT_EQ(1U, stats_.counter("tracing.zipkin.reports_failed").value()); + } +} + +TEST_F(ZipkinDriverTest, CancelInflightRequestsOnDestruction) { + setupValidDriver("HTTP_JSON_V1"); + + StrictMock request1(&cm_.async_client_), + request2(&cm_.async_client_), request3(&cm_.async_client_), request4(&cm_.async_client_); + Http::AsyncClient::Callbacks* callback{}; + const absl::optional timeout(std::chrono::seconds(5)); + + // Expect 4 separate report requests to be made. + EXPECT_CALL(cm_.async_client_, + send_(_, _, Http::AsyncClient::RequestOptions().setTimeout(timeout))) + .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&callback)), Return(&request1))) + .WillOnce(Return(&request2)) + .WillOnce(Return(&request3)) + .WillOnce(Return(&request4)); + EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.zipkin.min_flush_spans", 5)) + .Times(4) + .WillRepeatedly(Return(1)); + EXPECT_CALL(runtime_.snapshot_, getInteger("tracing.zipkin.request_timeout", 5000U)) + .Times(4) + .WillRepeatedly(Return(5000U)); + + // Trigger 1st report request. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + // Trigger 2nd report request. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + // Trigger 3rd report request. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + // Trigger 4th report request. + driver_ + ->startSpan(config_, request_headers_, operation_name_, start_time_, + {Tracing::Reason::Sampling, true}) + ->finishSpan(); + + Http::ResponseMessagePtr msg(new Http::ResponseMessageImpl( + Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "404"}}})); + + // Simulate completion of the 2nd report request. + callback->onSuccess(request2, std::move(msg)); + + // Simulate failure of the 3rd report request. + callback->onFailure(request3, Http::AsyncClient::FailureReason::Reset); + + // Expect 1st and 4th requests to be cancelled on destruction. + EXPECT_CALL(request1, cancel()); + EXPECT_CALL(request4, cancel()); + + // Trigger destruction. + driver_.reset(); +} + TEST_F(ZipkinDriverTest, FlushSpansTimer) { setupValidDriver("HTTP_JSON_V1"); diff --git a/test/extensions/transport_sockets/alts/alts_integration_test.cc b/test/extensions/transport_sockets/alts/alts_integration_test.cc index 39189c817016..587fd3b8e490 100644 --- a/test/extensions/transport_sockets/alts/alts_integration_test.cc +++ b/test/extensions/transport_sockets/alts/alts_integration_test.cc @@ -165,7 +165,7 @@ TEST_P(AltsIntegrationTestValidPeer, RouterRequestAndResponseWithBodyNoBuffer) { ConnectionCreationFunction creator = [this]() -> Network::ClientConnectionPtr { return makeAltsConnection(); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } class AltsIntegrationTestEmptyPeer : public AltsIntegrationTestBase { @@ -186,7 +186,7 @@ TEST_P(AltsIntegrationTestEmptyPeer, RouterRequestAndResponseWithBodyNoBuffer) { ConnectionCreationFunction creator = [this]() -> Network::ClientConnectionPtr { return makeAltsConnection(); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } class AltsIntegrationTestClientInvalidPeer : public AltsIntegrationTestBase { diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index 7de2b82e2065..2b6c67057c28 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -588,7 +588,7 @@ TEST_F(SslServerContextImplTicketTest, TicketKeySdsNotReady) { NiceMock cluster_manager; NiceMock init_manager; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); - EXPECT_CALL(factory_context_, dispatcher()).WillOnce(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); // EXPECT_CALL(factory_context_, random()).WillOnce(ReturnRef(random)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, clusterManager()).WillOnce(ReturnRef(cluster_manager)); @@ -705,6 +705,75 @@ TEST_F(SslServerContextImplTicketTest, VerifySanWithNoCA) { "is insecure and not allowed"); } +TEST_F(SslServerContextImplTicketTest, StatelessSessionResumptionEnabledByDefault) { + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_tmpdir }}/unittestcert.pem" + private_key: + filename: "{{ test_tmpdir }}/unittestkey.pem" + )EOF"; + TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context); + + ServerContextConfigImpl server_context_config(tls_context, factory_context_); + EXPECT_FALSE(server_context_config.disableStatelessSessionResumption()); +} + +TEST_F(SslServerContextImplTicketTest, StatelessSessionResumptionExplicitlyEnabled) { + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_tmpdir }}/unittestcert.pem" + private_key: + filename: "{{ test_tmpdir }}/unittestkey.pem" + disable_stateless_session_resumption: false + )EOF"; + TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context); + + ServerContextConfigImpl server_context_config(tls_context, factory_context_); + EXPECT_FALSE(server_context_config.disableStatelessSessionResumption()); +} + +TEST_F(SslServerContextImplTicketTest, StatelessSessionResumptionDisabled) { + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_tmpdir }}/unittestcert.pem" + private_key: + filename: "{{ test_tmpdir }}/unittestkey.pem" + disable_stateless_session_resumption: true + )EOF"; + TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context); + + ServerContextConfigImpl server_context_config(tls_context, factory_context_); + EXPECT_TRUE(server_context_config.disableStatelessSessionResumption()); +} + +TEST_F(SslServerContextImplTicketTest, StatelessSessionResumptionEnabledWhenKeyIsConfigured) { + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; + const std::string tls_context_yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_tmpdir }}/unittestcert.pem" + private_key: + filename: "{{ test_tmpdir }}/unittestkey.pem" + session_ticket_keys: + keys: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a" +)EOF"; + TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context); + + ServerContextConfigImpl server_context_config(tls_context, factory_context_); + EXPECT_FALSE(server_context_config.disableStatelessSessionResumption()); +} + class ClientContextConfigImplTest : public SslCertsTest {}; // Validate that empty SNI (according to C string rules) fails config validation. @@ -932,7 +1001,7 @@ TEST_F(ClientContextConfigImplTest, SecretNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillOnce(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); sds_secret_configs->set_name("abc.com"); @@ -964,7 +1033,7 @@ TEST_F(ClientContextConfigImplTest, ValidationContextNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillOnce(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); sds_secret_configs->set_name("abc.com"); @@ -1270,7 +1339,7 @@ TEST_F(ServerContextConfigImplTest, SecretNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillOnce(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); sds_secret_configs->set_name("abc.com"); @@ -1302,7 +1371,7 @@ TEST_F(ServerContextConfigImplTest, ValidationContextNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillOnce(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); sds_secret_configs->set_name("abc.com"); diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index bf55f47d034e..bd755736ef45 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -89,7 +89,7 @@ TEST_P(SslIntegrationTest, RouterRequestAndResponseWithGiantBodyBuffer) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection({}); }; - testRouterRequestAndResponseWithBody(16 * 1024 * 1024, 16 * 1024 * 1024, false, &creator); + testRouterRequestAndResponseWithBody(16 * 1024 * 1024, 16 * 1024 * 1024, false, false, &creator); checkStats(); } @@ -97,7 +97,7 @@ TEST_P(SslIntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection({}); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -108,7 +108,7 @@ TEST_P(SslIntegrationTest, RouterRequestAndResponseWithBodyNoBufferHttp2) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection(ClientSslTransportOptions().setAlpn(true)); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -116,7 +116,7 @@ TEST_P(SslIntegrationTest, RouterRequestAndResponseWithBodyNoBufferVerifySAN) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection(ClientSslTransportOptions().setSan(true)); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -125,7 +125,7 @@ TEST_P(SslIntegrationTest, RouterRequestAndResponseWithBodyNoBufferHttp2VerifySA ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection(ClientSslTransportOptions().setAlpn(true).setSan(true)); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -244,7 +244,7 @@ TEST_P(SslCertficateIntegrationTest, ServerRsa) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection({}); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -255,7 +255,7 @@ TEST_P(SslCertficateIntegrationTest, ServerEcdsa) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection({}); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -266,7 +266,7 @@ TEST_P(SslCertficateIntegrationTest, ServerRsaEcdsa) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection({}); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -277,7 +277,7 @@ TEST_P(SslCertficateIntegrationTest, ClientRsaOnly) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection(rsaOnlyClientOptions()); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -302,7 +302,7 @@ TEST_P(SslCertficateIntegrationTest, ServerRsaEcdsaClientRsaOnly) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection(rsaOnlyClientOptions()); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -329,7 +329,7 @@ TEST_P(SslCertficateIntegrationTest, ServerEcdsaClientEcdsaOnly) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection(ecdsaOnlyClientOptions()); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -341,7 +341,7 @@ TEST_P(SslCertficateIntegrationTest, ServerRsaEcdsaClientEcdsaOnly) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection(ecdsaOnlyClientOptions()); }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); } @@ -550,7 +550,7 @@ TEST_P(SslTapIntegrationTest, RequestWithTextProto) { return makeSslClientConnection({}); }; const uint64_t id = Network::ConnectionImpl::nextGlobalIdForTest() + 1; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkStats(); codec_client_->close(); test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); @@ -576,7 +576,7 @@ TEST_P(SslTapIntegrationTest, RequestWithJsonBodyAsStringUpstreamTap) { return makeSslClientConnection({}); }; const uint64_t id = Network::ConnectionImpl::nextGlobalIdForTest() + 2; - testRouterRequestAndResponseWithBody(512, 1024, false, &creator); + testRouterRequestAndResponseWithBody(512, 1024, false, false, &creator); checkStats(); codec_client_->close(); test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); diff --git a/test/extensions/transport_sockets/tls/ssl_certs_test.h b/test/extensions/transport_sockets/tls/ssl_certs_test.h index 16c1b7922517..d6d450ca748d 100644 --- a/test/extensions/transport_sockets/tls/ssl_certs_test.h +++ b/test/extensions/transport_sockets/tls/ssl_certs_test.h @@ -11,7 +11,7 @@ using testing::ReturnRef; namespace Envoy { class SslCertsTest : public testing::Test { public: - static void SetUpTestSuite() { + static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) TestEnvironment::exec({TestEnvironment::runfilesPath( "test/extensions/transport_sockets/tls/gen_unittest_certs.sh")}); } diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 0abe39e7b970..7009310a53de 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -264,7 +264,7 @@ class TestUtilOptions : public TestUtilOptionsBase { void testUtil(const TestUtilOptions& options) { Event::SimulatedTimeSystem time_system; - Stats::IsolatedStoreImpl server_stats_store; + Stats::TestUtil::TestStore server_stats_store; Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system); testing::NiceMock server_factory_context; @@ -305,7 +305,7 @@ void testUtil(const TestUtilOptions& options) { TestUtility::loadFromYaml(TestEnvironment::substitute(options.clientCtxYaml()), client_tls_context); - Stats::IsolatedStoreImpl client_stats_store; + Stats::TestUtil::TestStore client_stats_store; Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system); testing::NiceMock client_factory_context; @@ -320,10 +320,12 @@ void testUtil(const TestUtilOptions& options) { client_ssl_socket_factory.createTransportSocket(nullptr), nullptr); Network::ConnectionPtr server_connection; Network::MockConnectionCallbacks server_connection_callbacks; + StreamInfo::MockStreamInfo stream_info; EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection = dispatcher->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), + stream_info); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -569,7 +571,7 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { const auto& filter_chain = options.listener().filter_chains(0); std::vector server_names(filter_chain.filter_chain_match().server_names().begin(), filter_chain.filter_chain_match().server_names().end()); - Stats::IsolatedStoreImpl server_stats_store; + Stats::TestUtil::TestStore server_stats_store; Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system); testing::NiceMock server_factory_context; @@ -587,7 +589,7 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { Network::MockConnectionHandler connection_handler; Network::ListenerPtr listener = dispatcher->createListener(socket, callbacks, true); - Stats::IsolatedStoreImpl client_stats_store; + Stats::TestUtil::TestStore client_stats_store; Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system); testing::NiceMock client_factory_context; @@ -616,6 +618,7 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { Network::ConnectionPtr server_connection; Network::MockConnectionCallbacks server_connection_callbacks; + StreamInfo::MockStreamInfo stream_info; EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { std::string sni = options.transportSocketOptions() != nullptr && @@ -624,7 +627,8 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { : options.clientCtxProto().sni(); socket->setRequestedServerName(sni); server_connection = dispatcher->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), + stream_info); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -770,13 +774,14 @@ void configureServerAndExpiredClientCertificate( class SslSocketTest : public SslCertsTest, public testing::WithParamInterface { protected: - SslSocketTest() : dispatcher_(api_->allocateDispatcher()) {} + SslSocketTest() : dispatcher_(api_->allocateDispatcher()), stream_info_(api_->timeSource()) {} void testClientSessionResumption(const std::string& server_ctx_yaml, const std::string& client_ctx_yaml, bool expect_reuse, const Network::Address::IpVersion version); Event::DispatcherPtr dispatcher_; + StreamInfo::StreamInfoImpl stream_info_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, SslSocketTest, @@ -2335,7 +2340,7 @@ TEST_P(SslSocketTest, FlushCloseDuringHandshake) { TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), tls_context); auto server_cfg = std::make_unique(tls_context, factory_context_); ContextManagerImpl manager(time_system_); - Stats::IsolatedStoreImpl server_stats_store; + Stats::TestUtil::TestStore server_stats_store; ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); @@ -2357,7 +2362,8 @@ TEST_P(SslSocketTest, FlushCloseDuringHandshake) { EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection = dispatcher_->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), + stream_info_); server_connection->addConnectionCallbacks(server_connection_callbacks); Buffer::OwnedImpl data("hello"); server_connection->write(data, false); @@ -2390,7 +2396,7 @@ TEST_P(SslSocketTest, HalfClose) { TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), server_tls_context); auto server_cfg = std::make_unique(server_tls_context, factory_context_); ContextManagerImpl manager(time_system_); - Stats::IsolatedStoreImpl server_stats_store; + Stats::TestUtil::TestStore server_stats_store; ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); @@ -2409,7 +2415,7 @@ TEST_P(SslSocketTest, HalfClose) { envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), tls_context); auto client_cfg = std::make_unique(tls_context, factory_context_); - Stats::IsolatedStoreImpl client_stats_store; + Stats::TestUtil::TestStore client_stats_store; ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( @@ -2426,7 +2432,8 @@ TEST_P(SslSocketTest, HalfClose) { EXPECT_CALL(listener_callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection = dispatcher_->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), + stream_info_); server_connection->enableHalfClose(true); server_connection->addReadFilter(server_read_filter); server_connection->addConnectionCallbacks(server_connection_callbacks); @@ -2471,7 +2478,7 @@ TEST_P(SslSocketTest, ClientAuthMultipleCAs) { TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), server_tls_context); auto server_cfg = std::make_unique(server_tls_context, factory_context_); ContextManagerImpl manager(time_system_); - Stats::IsolatedStoreImpl server_stats_store; + Stats::TestUtil::TestStore server_stats_store; ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); @@ -2493,7 +2500,7 @@ TEST_P(SslSocketTest, ClientAuthMultipleCAs) { envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), tls_context); auto client_cfg = std::make_unique(tls_context, factory_context_); - Stats::IsolatedStoreImpl client_stats_store; + Stats::TestUtil::TestStore client_stats_store; ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( socket->localAddress(), Network::Address::InstanceConstSharedPtr(), @@ -2519,7 +2526,8 @@ TEST_P(SslSocketTest, ClientAuthMultipleCAs) { EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection = dispatcher_->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), + stream_info_); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -2549,7 +2557,7 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, Event::SimulatedTimeSystem time_system; ContextManagerImpl manager(*time_system); - Stats::IsolatedStoreImpl server_stats_store; + Stats::TestUtil::TestStore server_stats_store; Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system); testing::NiceMock server_factory_context; @@ -2582,7 +2590,7 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_tls_context; TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), client_tls_context); - Stats::IsolatedStoreImpl client_stats_store; + Stats::TestUtil::TestStore client_stats_store; Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system); testing::NiceMock client_factory_context; @@ -2601,13 +2609,14 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, SSL_SESSION* ssl_session = nullptr; Network::ConnectionPtr server_connection; + StreamInfo::StreamInfoImpl stream_info(time_system); EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { Network::TransportSocketFactory& tsf = socket->localAddress() == socket1->localAddress() ? server_ssl_socket_factory1 : server_ssl_socket_factory2; - server_connection = dispatcher->createServerConnection(std::move(socket), - tsf.createTransportSocket(nullptr)); + server_connection = dispatcher->createServerConnection( + std::move(socket), tsf.createTransportSocket(nullptr), stream_info); })); EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)) @@ -2643,13 +2652,14 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, client_connection->connect(); Network::MockConnectionCallbacks server_connection_callbacks; + StreamInfo::StreamInfoImpl stream_info2(time_system); EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { Network::TransportSocketFactory& tsf = socket->localAddress() == socket1->localAddress() ? server_ssl_socket_factory1 : server_ssl_socket_factory2; - server_connection = dispatcher->createServerConnection(std::move(socket), - tsf.createTransportSocket(nullptr)); + server_connection = dispatcher->createServerConnection( + std::move(socket), tsf.createTransportSocket(nullptr), stream_info2); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -2685,6 +2695,83 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, EXPECT_EQ(expect_reuse ? 1UL : 0UL, client_stats_store.counter("ssl.session_reused").value()); } +void testSupportForStatelessSessionResumption(const std::string& server_ctx_yaml, + const std::string& client_ctx_yaml, + bool expect_support, + const Network::Address::IpVersion ip_version) { + Event::SimulatedTimeSystem time_system; + ContextManagerImpl manager(*time_system); + + Stats::IsolatedStoreImpl server_stats_store; + Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system); + testing::NiceMock + server_factory_context; + ON_CALL(server_factory_context, api()).WillByDefault(ReturnRef(*server_api)); + + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext server_tls_context; + TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), server_tls_context); + auto server_cfg = + std::make_unique(server_tls_context, server_factory_context); + + ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, + server_stats_store, {}); + auto tcp_socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(ip_version), nullptr, true); + NiceMock callbacks; + Network::MockConnectionHandler connection_handler; + Event::DispatcherPtr dispatcher(server_api->allocateDispatcher()); + Network::ListenerPtr listener = dispatcher->createListener(tcp_socket, callbacks, true); + + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_tls_context; + TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), client_tls_context); + + Stats::IsolatedStoreImpl client_stats_store; + Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system); + testing::NiceMock + client_factory_context; + ON_CALL(client_factory_context, api()).WillByDefault(ReturnRef(*client_api)); + + auto client_cfg = + std::make_unique(client_tls_context, client_factory_context); + ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); + Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection( + tcp_socket->localAddress(), Network::Address::InstanceConstSharedPtr(), + ssl_socket_factory.createTransportSocket(nullptr), nullptr); + + Network::MockConnectionCallbacks client_connection_callbacks; + client_connection->addConnectionCallbacks(client_connection_callbacks); + client_connection->connect(); + + StreamInfo::StreamInfoImpl stream_info(time_system); + Network::ConnectionPtr server_connection; + EXPECT_CALL(callbacks, onAccept_(_)) + .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { + server_connection = dispatcher->createServerConnection( + std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), + stream_info); + + const SslSocketInfo* ssl_socket = + dynamic_cast(server_connection->ssl().get()); + SSL* server_ssl_socket = ssl_socket->rawSslForTest(); + SSL_CTX* server_ssl_context = SSL_get_SSL_CTX(server_ssl_socket); + if (expect_support) { + EXPECT_EQ(0, (SSL_CTX_get_options(server_ssl_context) & SSL_OP_NO_TICKET)); + } else { + EXPECT_EQ(SSL_OP_NO_TICKET, (SSL_CTX_get_options(server_ssl_context) & SSL_OP_NO_TICKET)); + } + })); + + EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)) + .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { + client_connection->close(Network::ConnectionCloseType::NoFlush); + server_connection->close(Network::ConnectionCloseType::NoFlush); + dispatcher->exit(); + })); + + EXPECT_CALL(client_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); + dispatcher->run(Event::Dispatcher::RunType::Block); +} + } // namespace TEST_P(SslSocketTest, TicketSessionResumption) { @@ -2971,6 +3058,59 @@ TEST_P(SslSocketTest, TicketSessionResumptionDifferentServerCertDifferentSAN) { GetParam()); } +TEST_P(SslSocketTest, StatelessSessionResumptionDisabled) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_tmpdir }}/unittestcert.pem" + private_key: + filename: "{{ test_tmpdir }}/unittestkey.pem" + disable_stateless_session_resumption: true +)EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + )EOF"; + + testSupportForStatelessSessionResumption(server_ctx_yaml, client_ctx_yaml, false, GetParam()); +} + +TEST_P(SslSocketTest, SatelessSessionResumptionEnabledExplicitly) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_tmpdir }}/unittestcert.pem" + private_key: + filename: "{{ test_tmpdir }}/unittestkey.pem" + disable_stateless_session_resumption: false +)EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + )EOF"; + + testSupportForStatelessSessionResumption(server_ctx_yaml, client_ctx_yaml, true, GetParam()); +} + +TEST_P(SslSocketTest, StatelessSessionResumptionEnabledByDefault) { + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_tmpdir }}/unittestcert.pem" + private_key: + filename: "{{ test_tmpdir }}/unittestkey.pem" +)EOF"; + + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + )EOF"; + + testSupportForStatelessSessionResumption(server_ctx_yaml, client_ctx_yaml, true, GetParam()); +} + // Test that if two listeners use the same cert and session ticket key, but // different client CA, that sessions cannot be resumed. TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { @@ -3010,7 +3150,7 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { TestUtility::loadFromYaml(TestEnvironment::substitute(server2_ctx_yaml), tls_context2); auto server2_cfg = std::make_unique(tls_context2, factory_context_); ContextManagerImpl manager(time_system_); - Stats::IsolatedStoreImpl server_stats_store; + Stats::TestUtil::TestStore server_stats_store; ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); ServerSslSocketFactory server2_ssl_socket_factory(std::move(server2_cfg), manager, @@ -3037,7 +3177,7 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), tls_context); auto client_cfg = std::make_unique(tls_context, factory_context_); - Stats::IsolatedStoreImpl client_stats_store; + Stats::TestUtil::TestStore client_stats_store; ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( socket->localAddress(), Network::Address::InstanceConstSharedPtr(), @@ -3055,8 +3195,8 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { Network::TransportSocketFactory& tsf = accepted_socket->localAddress() == socket->localAddress() ? server_ssl_socket_factory : server2_ssl_socket_factory; - server_connection = dispatcher_->createServerConnection(std::move(accepted_socket), - tsf.createTransportSocket(nullptr)); + server_connection = dispatcher_->createServerConnection( + std::move(accepted_socket), tsf.createTransportSocket(nullptr), stream_info_); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -3095,8 +3235,8 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { Network::TransportSocketFactory& tsf = accepted_socket->localAddress() == socket->localAddress() ? server_ssl_socket_factory : server2_ssl_socket_factory; - server_connection = dispatcher_->createServerConnection(std::move(accepted_socket), - tsf.createTransportSocket(nullptr)); + server_connection = dispatcher_->createServerConnection( + std::move(accepted_socket), tsf.createTransportSocket(nullptr), stream_info_); server_connection->addConnectionCallbacks(server_connection_callbacks); })); EXPECT_CALL(server_connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose)); @@ -3118,7 +3258,7 @@ void SslSocketTest::testClientSessionResumption(const std::string& server_ctx_ya ContextManagerImpl manager(time_system_); - Stats::IsolatedStoreImpl server_stats_store; + Stats::TestUtil::TestStore server_stats_store; Api::ApiPtr server_api = Api::createApiForTest(server_stats_store, time_system_); testing::NiceMock server_factory_context; @@ -3145,7 +3285,7 @@ void SslSocketTest::testClientSessionResumption(const std::string& server_ctx_ya envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_ctx_proto; TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), client_ctx_proto); - Stats::IsolatedStoreImpl client_stats_store; + Stats::TestUtil::TestStore client_stats_store; Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system_); testing::NiceMock client_factory_context; @@ -3181,7 +3321,8 @@ void SslSocketTest::testClientSessionResumption(const std::string& server_ctx_ya EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection = dispatcher->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), + stream_info_); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -3226,7 +3367,8 @@ void SslSocketTest::testClientSessionResumption(const std::string& server_ctx_ya EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection = dispatcher->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), + stream_info_); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -3387,7 +3529,7 @@ TEST_P(SslSocketTest, SslError) { TestUtility::loadFromYaml(TestEnvironment::substitute(server_ctx_yaml), tls_context); auto server_cfg = std::make_unique(tls_context, factory_context_); ContextManagerImpl manager(time_system_); - Stats::IsolatedStoreImpl server_stats_store; + Stats::TestUtil::TestStore server_stats_store; ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); @@ -3409,7 +3551,8 @@ TEST_P(SslSocketTest, SslError) { EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection = dispatcher_->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), + stream_info_); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -4059,12 +4202,12 @@ TEST_P(SslSocketTest, OverrideApplicationProtocols) { // Validate that if downstream secrets are not yet downloaded from SDS server, Envoy creates // NotReadySslSocket object to handle downstream connection. TEST_P(SslSocketTest, DownstreamNotReadySslSocket) { - Stats::IsolatedStoreImpl stats_store; + Stats::TestUtil::TestStore stats_store; NiceMock local_info; testing::NiceMock factory_context; NiceMock init_manager; NiceMock dispatcher; - EXPECT_CALL(factory_context, dispatcher()).WillOnce(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); EXPECT_CALL(factory_context, initManager()).WillRepeatedly(Return(&init_manager)); @@ -4095,7 +4238,7 @@ TEST_P(SslSocketTest, DownstreamNotReadySslSocket) { // Validate that if upstream secrets are not yet downloaded from SDS server, Envoy creates // NotReadySslSocket object to handle upstream connection. TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { - Stats::IsolatedStoreImpl stats_store; + Stats::TestUtil::TestStore stats_store; NiceMock local_info; testing::NiceMock factory_context; NiceMock init_manager; @@ -4103,7 +4246,7 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); EXPECT_CALL(factory_context, initManager()).WillRepeatedly(Return(&init_manager)); - EXPECT_CALL(factory_context, dispatcher()).WillOnce(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; auto sds_secret_configs = @@ -4154,7 +4297,7 @@ class SslReadBufferLimitTest : public SslSocketTest { socket_->localAddress(), source_address_, std::move(transport_socket), nullptr); client_connection_->addConnectionCallbacks(client_callbacks_); client_connection_->connect(); - read_filter_.reset(new Network::MockReadFilter()); + read_filter_ = std::make_shared(); } void readBufferLimitTest(uint32_t read_buffer_limit, uint32_t expected_chunk_size, @@ -4164,7 +4307,8 @@ class SslReadBufferLimitTest : public SslSocketTest { EXPECT_CALL(listener_callbacks_, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection_ = dispatcher_->createServerConnection( - std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr), + stream_info_); server_connection_->setBufferLimits(read_buffer_limit); server_connection_->addConnectionCallbacks(server_callbacks_); server_connection_->addReadFilter(read_filter_); @@ -4243,7 +4387,8 @@ class SslReadBufferLimitTest : public SslSocketTest { EXPECT_CALL(listener_callbacks_, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection_ = dispatcher_->createServerConnection( - std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr), + stream_info_); server_connection_->setBufferLimits(read_buffer_limit); server_connection_->addConnectionCallbacks(server_callbacks_); server_connection_->addReadFilter(read_filter_); @@ -4282,8 +4427,8 @@ class SslReadBufferLimitTest : public SslSocketTest { dispatcher_->run(Event::Dispatcher::RunType::Block); } - Stats::IsolatedStoreImpl server_stats_store_; - Stats::IsolatedStoreImpl client_stats_store_; + Stats::TestUtil::TestStore server_stats_store_; + Stats::TestUtil::TestStore client_stats_store_; std::shared_ptr socket_; Network::MockListenerCallbacks listener_callbacks_; Network::MockConnectionHandler connection_handler_; @@ -4362,7 +4507,8 @@ TEST_P(SslReadBufferLimitTest, TestBind) { EXPECT_CALL(listener_callbacks_, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection_ = dispatcher_->createServerConnection( - std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr), + stream_info_); server_connection_->addConnectionCallbacks(server_callbacks_); server_connection_->addReadFilter(read_filter_); EXPECT_EQ("", server_connection_->nextProtocol()); @@ -4391,7 +4537,8 @@ TEST_P(SslReadBufferLimitTest, SmallReadsIntoSameSlice) { EXPECT_CALL(listener_callbacks_, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { server_connection_ = dispatcher_->createServerConnection( - std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr)); + std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr), + stream_info_); server_connection_->setBufferLimits(read_buffer_limit); server_connection_->addConnectionCallbacks(server_callbacks_); server_connection_->addReadFilter(read_filter_); @@ -4409,7 +4556,7 @@ TEST_P(SslReadBufferLimitTest, SmallReadsIntoSameSlice) { EXPECT_CALL(*read_filter_, onData(_, _)) .WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) -> Network::FilterStatus { EXPECT_GE(expected_chunk_size, data.length()); - EXPECT_EQ(1, data.getRawSlices(nullptr, 0)); + EXPECT_EQ(1, data.getRawSlices().size()); filter_seen += data.length(); data.drain(data.length()); if (filter_seen == (write_size * num_writes)) { diff --git a/test/extensions/wasm/BUILD b/test/extensions/wasm/BUILD index ce9cd67f854c..32d98b270cc8 100644 --- a/test/extensions/wasm/BUILD +++ b/test/extensions/wasm/BUILD @@ -51,7 +51,7 @@ envoy_extension_cc_test( "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", - "@envoy_api//envoy/extensions/wasm/v3:pkg_cc_proto", + "@envoy_api//envoy/config/wasm/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/wasm/config_test.cc b/test/extensions/wasm/config_test.cc index f56318e63ddd..46a2d604f0a8 100644 --- a/test/extensions/wasm/config_test.cc +++ b/test/extensions/wasm/config_test.cc @@ -1,6 +1,6 @@ #include -#include "envoy/extensions/wasm/v3/wasm.pb.validate.h" +#include "envoy/config/wasm/v3/wasm.pb.validate.h" #include "envoy/registry/registry.h" #include "common/stats/isolated_store_impl.h" @@ -36,7 +36,7 @@ TEST_P(WasmFactoryTest, CreateWasmFromWASM) { auto factory = Registry::FactoryRegistry::getFactory("envoy.wasm"); ASSERT_NE(factory, nullptr); - envoy::extensions::wasm::v3::WasmService config; + envoy::config::wasm::v3::WasmService config; config.mutable_config()->mutable_vm_config()->set_runtime( absl::StrCat("envoy.wasm.runtime.", GetParam())); config.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename( @@ -65,7 +65,7 @@ TEST_P(WasmFactoryTest, CreateWasmFromWASMPerThread) { auto factory = Registry::FactoryRegistry::getFactory("envoy.wasm"); ASSERT_NE(factory, nullptr); - envoy::extensions::wasm::v3::WasmService config; + envoy::config::wasm::v3::WasmService config; config.mutable_config()->mutable_vm_config()->set_runtime( absl::StrCat("envoy.wasm.runtime.", GetParam())); config.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename( @@ -94,7 +94,7 @@ TEST_P(WasmFactoryTest, MissingImport) { auto factory = Registry::FactoryRegistry::getFactory("envoy.wasm"); ASSERT_NE(factory, nullptr); - envoy::extensions::wasm::v3::WasmService config; + envoy::config::wasm::v3::WasmService config; config.mutable_config()->mutable_vm_config()->set_runtime( absl::StrCat("envoy.wasm.runtime.", GetParam())); config.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename( @@ -116,7 +116,7 @@ TEST_P(WasmFactoryTest, UnspecifiedRuntime) { auto factory = Registry::FactoryRegistry::getFactory("envoy.wasm"); ASSERT_NE(factory, nullptr); - envoy::extensions::wasm::v3::WasmService config; + envoy::config::wasm::v3::WasmService config; config.mutable_config()->mutable_vm_config()->set_runtime(""); config.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename( TestEnvironment::substitute( @@ -137,7 +137,7 @@ TEST_P(WasmFactoryTest, UnknownRuntime) { auto factory = Registry::FactoryRegistry::getFactory("envoy.wasm"); ASSERT_NE(factory, nullptr); - envoy::extensions::wasm::v3::WasmService config; + envoy::config::wasm::v3::WasmService config; config.mutable_config()->mutable_vm_config()->set_runtime("envoy.wasm.runtime.invalid"); config.mutable_config()->mutable_vm_config()->mutable_code()->mutable_local()->set_filename( TestEnvironment::substitute( diff --git a/test/extensions/wasm/test_data/stats_cpp.cc b/test/extensions/wasm/test_data/stats_cpp.cc index 5f5e919766cf..901131b2deb4 100644 --- a/test/extensions/wasm/test_data/stats_cpp.cc +++ b/test/extensions/wasm/test_data/stats_cpp.cc @@ -28,7 +28,7 @@ extern "C" PROXY_WASM_KEEPALIVE uint32_t proxy_on_vm_start(uint32_t, uint32_t) { logInfo(std::string("get counter = ") + std::to_string(value)); CHECK_RESULT(getMetric(g, &value)); logWarn(std::string("get gauge = ") + std::to_string(value)); - // Get on histograms is not suppoorted. + // Get on histograms is not supported. if (getMetric(h, &value) != WasmResult::Ok) { logError(std::string("get histogram = Unsupported")); } diff --git a/test/fuzz/fuzz_runner.cc b/test/fuzz/fuzz_runner.cc index eb44a14050e3..508f1e922c43 100644 --- a/test/fuzz/fuzz_runner.cc +++ b/test/fuzz/fuzz_runner.cc @@ -9,6 +9,8 @@ #include "test/test_common/environment.h" +#include "gmock/gmock.h" + namespace Envoy { namespace Fuzz { @@ -50,7 +52,13 @@ void Runner::setupEnvironment(int argc, char** argv, spdlog::level::level_enum d } // namespace Fuzz } // namespace Envoy -extern "C" int LLVMFuzzerInitialize(int* /*argc*/, char*** argv) { +// LLVMFuzzerInitialize() is called by LibFuzzer once before fuzzing starts. +// NOLINTNEXTLINE(readability-identifier-naming) +extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) { + // Before parsing gmock flags, set the default value of flag --gmock_verbose to "error". + // This suppresses logs from NiceMock objects, which can be noisy and provide little value. + testing::GMOCK_FLAG(verbose) = "error"; + testing::InitGoogleMock(argc, *argv); Envoy::Fuzz::Runner::setupEnvironment(1, *argv, spdlog::level::critical); return 0; } diff --git a/test/fuzz/fuzz_runner.h b/test/fuzz/fuzz_runner.h index e2279365cab6..31a317a220c0 100644 --- a/test/fuzz/fuzz_runner.h +++ b/test/fuzz/fuzz_runner.h @@ -8,7 +8,7 @@ #include "libprotobuf_mutator/src/libfuzzer/libfuzzer_macro.h" // Bring in FuzzedDataProvider, see // https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider -#include "compiler_rt/fuzzer/utils/FuzzedDataProvider.h" +#include "fuzzer/utils/FuzzedDataProvider.h" #include "spdlog/spdlog.h" namespace Envoy { diff --git a/test/fuzz/main.cc b/test/fuzz/main.cc index 749c38600ab2..98e30e63cbb8 100644 --- a/test/fuzz/main.cc +++ b/test/fuzz/main.cc @@ -26,6 +26,7 @@ #endif #include "gtest/gtest.h" +#include "gmock/gmock.h" namespace Envoy { namespace { @@ -90,6 +91,7 @@ int main(int argc, char** argv) { } testing::InitGoogleTest(&argc, argv); + testing::InitGoogleMock(&argc, argv); Envoy::Fuzz::Runner::setupEnvironment(argc, argv, spdlog::level::info); return RUN_ALL_TESTS(); diff --git a/test/integration/BUILD b/test/integration/BUILD index d6001a833cae..00185a5762af 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -91,7 +91,6 @@ py_binary( name = "capture_fuzz_gen", srcs = ["capture_fuzz_gen.py"], licenses = ["notice"], # Apache 2 - python_version = "PY3", visibility = ["//visibility:public"], deps = [ ":capture_fuzz_proto_py_proto", @@ -152,12 +151,13 @@ envoy_cc_test( name = "filter_manager_integration_test", srcs = [ "filter_manager_integration_test.cc", - "filter_manager_integration_test.h", ], deps = [ ":filter_manager_integration_proto_cc_proto", ":http_integration_lib", ":integration_lib", + "//source/extensions/filters/listener/original_dst:config", + "//source/extensions/filters/listener/tls_inspector:config", "//source/extensions/filters/network/common:factory_base_lib", "//source/extensions/filters/network/echo:config", "//source/extensions/filters/network/tcp_proxy:config", @@ -633,8 +633,9 @@ envoy_cc_test( srcs = [ "echo_integration_test.cc", ], - # This test must be run in exclusive mode: see comments in AddRemoveListener - tags = ["exclusive"], + # Uncomment this line to run this test repeatedly in exclusive mode if not using docker-sandbox, + # or RBE, see comments in AddRemoveListener. + # tags = ["exclusive"], deps = [ ":integration_lib", "//source/extensions/filters/network/echo:config", @@ -1098,3 +1099,34 @@ envoy_cc_test( "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", ], ) + +envoy_cc_test( + name = "listener_filter_integration_test", + srcs = [ + "listener_filter_integration_test.cc", + ], + data = [ + "//test/config/integration/certs", + ], + deps = [ + ":integration_lib", + "//source/common/config:api_version_lib", + "//source/common/event:dispatcher_includes", + "//source/common/event:dispatcher_lib", + "//source/common/network:utility_lib", + "//source/extensions/access_loggers/file:config", + "//source/extensions/filters/listener/tls_inspector:config", + "//source/extensions/filters/listener/tls_inspector:tls_inspector_lib", + "//source/extensions/filters/network/echo:config", + "//source/extensions/transport_sockets/tls:config", + "//source/extensions/transport_sockets/tls:context_config_lib", + "//source/extensions/transport_sockets/tls:context_lib", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/secret:secret_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", + ], +) diff --git a/test/integration/api_listener_integration_test.cc b/test/integration/api_listener_integration_test.cc index 47c9a7faf5c5..f00a7bd1fe0d 100644 --- a/test/integration/api_listener_integration_test.cc +++ b/test/integration/api_listener_integration_test.cc @@ -16,7 +16,7 @@ namespace { class ApiListenerIntegrationTest : public BaseIntegrationTest, public testing::TestWithParam { public: - ApiListenerIntegrationTest() : BaseIntegrationTest(GetParam(), bootstrap_config()) { + ApiListenerIntegrationTest() : BaseIntegrationTest(GetParam(), bootstrapConfig()) { use_lds_ = false; autonomous_upstream_ = true; } @@ -28,7 +28,7 @@ class ApiListenerIntegrationTest : public BaseIntegrationTest, // Thus, the ApiListener has to be added in addition to the already existing listener in the // config. bootstrap.mutable_static_resources()->add_listeners()->MergeFrom( - Server::parseListenerFromV2Yaml(api_listener_config())); + Server::parseListenerFromV2Yaml(apiListenerConfig())); }); } @@ -37,15 +37,15 @@ class ApiListenerIntegrationTest : public BaseIntegrationTest, fake_upstreams_.clear(); } - static std::string bootstrap_config() { + static std::string bootstrapConfig() { // At least one empty filter chain needs to be specified. - return ConfigHelper::BASE_CONFIG + R"EOF( + return absl::StrCat(ConfigHelper::baseConfig(), R"EOF( filter_chains: filters: - )EOF"; + )EOF"); } - static std::string api_listener_config() { + static std::string apiListenerConfig() { return R"EOF( name: api_listener address: diff --git a/test/integration/api_version_integration_test.cc b/test/integration/api_version_integration_test.cc index 6e31f652e42b..d7681cfd4e98 100644 --- a/test/integration/api_version_integration_test.cc +++ b/test/integration/api_version_integration_test.cc @@ -112,8 +112,12 @@ class ApiVersionIntegrationTest : public testing::TestWithParam, std::string actual_type_url; const char ads_v2_sotw_endpoint[] = "/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources"; - const char ads_v3_delta_endpoint[] = + const char ads_v3_sotw_endpoint[] = "/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources"; + const char ads_v2_delta_endpoint[] = + "/envoy.service.discovery.v2.AggregatedDiscoveryService/DeltaAggregatedResources"; + const char ads_v3_delta_endpoint[] = + "/envoy.service.discovery.v3.AggregatedDiscoveryService/DeltaAggregatedResources"; switch (transportApiVersion()) { case envoy::config::core::v3::ApiVersion::AUTO: case envoy::config::core::v3::ApiVersion::V2: { @@ -133,7 +137,7 @@ class ApiVersionIntegrationTest : public testing::TestWithParam, EXPECT_TRUE(!hasHiddenEnvoyDeprecated(delta_discovery_request)); xds_stream_->startGrpcStream(); actual_type_url = delta_discovery_request.type_url(); - expected_endpoint = expected_v2_delta_endpoint; + expected_endpoint = ads() ? ads_v2_delta_endpoint : expected_v2_delta_endpoint; break; } case envoy::config::core::v3::ApiConfigSource::REST: { @@ -158,7 +162,7 @@ class ApiVersionIntegrationTest : public testing::TestWithParam, VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request)); EXPECT_TRUE(!hasHiddenEnvoyDeprecated(discovery_request)); actual_type_url = discovery_request.type_url(); - expected_endpoint = ads() ? ads_v3_delta_endpoint : expected_v3_sotw_endpoint; + expected_endpoint = ads() ? ads_v3_sotw_endpoint : expected_v3_sotw_endpoint; break; } case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: { @@ -167,7 +171,7 @@ class ApiVersionIntegrationTest : public testing::TestWithParam, VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, delta_discovery_request)); EXPECT_TRUE(!hasHiddenEnvoyDeprecated(delta_discovery_request)); actual_type_url = delta_discovery_request.type_url(); - expected_endpoint = expected_v3_delta_endpoint; + expected_endpoint = ads() ? ads_v3_delta_endpoint : expected_v3_delta_endpoint; break; } case envoy::config::core::v3::ApiConfigSource::REST: { @@ -258,7 +262,8 @@ INSTANTIATE_TEST_SUITE_P( AdsApiConfigSourcesExplicitApiVersions, ApiVersionIntegrationTest, testing::Combine(testing::Values(TestEnvironment::getIpVersionsForTest()[0]), testing::Values(true), - testing::Values(envoy::config::core::v3::ApiConfigSource::GRPC), + testing::Values(envoy::config::core::v3::ApiConfigSource::GRPC, + envoy::config::core::v3::ApiConfigSource::DELTA_GRPC), testing::Values(envoy::config::core::v3::ApiVersion::V2, envoy::config::core::v3::ApiVersion::V3), testing::Values(envoy::config::core::v3::ApiVersion::V2, diff --git a/test/integration/autonomous_upstream.cc b/test/integration/autonomous_upstream.cc index a7724a654044..70c14b75de7d 100644 --- a/test/integration/autonomous_upstream.cc +++ b/test/integration/autonomous_upstream.cc @@ -66,7 +66,8 @@ AutonomousHttpConnection::AutonomousHttpConnection(SharedConnectionWrapper& shar Stats::Store& store, Type type, AutonomousUpstream& upstream) : FakeHttpConnection(shared_connection, store, type, upstream.timeSystem(), - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::config::core::v3::HttpProtocolOptions::ALLOW), upstream_(upstream) {} Http::RequestDecoder& AutonomousHttpConnection::newStream(Http::ResponseEncoder& response_encoder, diff --git a/test/integration/cluster_filter_integration_test.cc b/test/integration/cluster_filter_integration_test.cc index 3dc24508237c..fa90e19b2795 100644 --- a/test/integration/cluster_filter_integration_test.cc +++ b/test/integration/cluster_filter_integration_test.cc @@ -81,7 +81,7 @@ class ClusterFilterIntegrationTest : public testing::TestWithParamaddCopy(Http::LowerCaseString("x-served-by"), parent_.connection().localAddress()->asString()); } + parent_.connection().dispatcher().post([this, headers_copy, end_stream]() -> void { encoder_.encodeHeaders(*headers_copy, end_stream); }); @@ -107,7 +108,7 @@ void FakeStream::encodeData(uint64_t size, bool end_stream) { } void FakeStream::encodeData(Buffer::Instance& data, bool end_stream) { - std::shared_ptr data_copy(new Buffer::OwnedImpl(data)); + std::shared_ptr data_copy = std::make_shared(data); parent_.connection().dispatcher().post( [this, data_copy, end_stream]() -> void { encoder_.encodeData(*data_copy, end_stream); }); } @@ -220,11 +221,12 @@ void FakeStream::finishGrpcStream(Grpc::Status::GrpcStatus status) { Http::TestHeaderMapImpl{{"grpc-status", std::to_string(static_cast(status))}}); } -FakeHttpConnection::FakeHttpConnection(SharedConnectionWrapper& shared_connection, - Stats::Store& store, Type type, - Event::TestTimeSystem& time_system, - uint32_t max_request_headers_kb, - uint32_t max_request_headers_count) +FakeHttpConnection::FakeHttpConnection( + SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, + Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, + uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) : FakeConnectionBase(shared_connection, time_system) { if (type == Type::HTTP1) { Http::Http1Settings http1_settings; @@ -232,7 +234,7 @@ FakeHttpConnection::FakeHttpConnection(SharedConnectionWrapper& shared_connectio http1_settings.enable_trailers_ = true; codec_ = std::make_unique( shared_connection_.connection(), store, *this, http1_settings, max_request_headers_kb, - max_request_headers_count); + max_request_headers_count, headers_with_underscores_action); } else { envoy::config::core::v3::Http2ProtocolOptions http2_options = ::Envoy::Http2::Utility::initializeAndValidateOptions( @@ -241,7 +243,7 @@ FakeHttpConnection::FakeHttpConnection(SharedConnectionWrapper& shared_connectio http2_options.set_allow_metadata(true); codec_ = std::make_unique( shared_connection_.connection(), *this, store, http2_options, max_request_headers_kb, - max_request_headers_count); + max_request_headers_count, headers_with_underscores_action); ASSERT(type == Type::HTTP2); } @@ -472,11 +474,11 @@ void FakeUpstream::threadRoutine() { } } -AssertionResult FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_dispatcher, - FakeHttpConnectionPtr& connection, - milliseconds timeout, - uint32_t max_request_headers_kb, - uint32_t max_request_headers_count) { +AssertionResult FakeUpstream::waitForHttpConnection( + Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection, milliseconds timeout, + uint32_t max_request_headers_kb, uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) { Event::TestTimeSystem& time_system = timeSystem(); auto end_time = time_system.monotonicTime() + timeout; { @@ -495,9 +497,9 @@ AssertionResult FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_di if (new_connections_.empty()) { return AssertionFailure() << "Got a new connection event, but didn't create a connection."; } - connection = std::make_unique(consumeConnection(), stats_store_, http_type_, - time_system, max_request_headers_kb, - max_request_headers_count); + connection = std::make_unique( + consumeConnection(), stats_store_, http_type_, time_system, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); } VERIFY_ASSERTION(connection->initialize()); VERIFY_ASSERTION(connection->readDisable(false)); @@ -528,7 +530,7 @@ FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_dispatcher, connection = std::make_unique( upstream.consumeConnection(), upstream.stats_store_, upstream.http_type_, upstream.timeSystem(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, - Http::DEFAULT_MAX_HEADERS_COUNT); + Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); lock.release(); VERIFY_ASSERTION(connection->initialize()); VERIFY_ASSERTION(connection->readDisable(false)); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 548b39b5e729..26379a3d31f4 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -69,6 +69,9 @@ class FakeStream : public Http::RequestDecoder, void setAddServedByHeader(bool add_header) { add_served_by_header_ = add_header; } const Http::RequestTrailerMapPtr& trailers() { return trailers_; } bool receivedData() { return received_data_; } + Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() { + return encoder_.http1StreamEncoderOptions(); + } ABSL_MUST_USE_RESULT testing::AssertionResult @@ -419,7 +422,9 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo FakeHttpConnection(SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, - uint32_t max_request_headers_count); + uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); // By default waitForNewStream assumes the next event is a new stream and // returns AssertionFailure if an unexpected event occurs. If a caller truly @@ -560,11 +565,13 @@ class FakeUpstream : Logger::Loggable, // Returns the new connection via the connection argument. ABSL_MUST_USE_RESULT - testing::AssertionResult - waitForHttpConnection(Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection, - std::chrono::milliseconds timeout = TestUtility::DefaultTimeout, - uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB, - uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT); + testing::AssertionResult waitForHttpConnection( + Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection, + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout, + uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB, + uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action = envoy::config::core::v3::HttpProtocolOptions::ALLOW); ABSL_MUST_USE_RESULT testing::AssertionResult @@ -666,9 +673,7 @@ class FakeUpstream : Logger::Loggable, bool bindToPort() override { return true; } bool handOffRestoredDestinationConnections() const override { return false; } uint32_t perConnectionBufferLimitBytes() const override { return 0; } - std::chrono::milliseconds listenerFiltersTimeout() const override { - return std::chrono::milliseconds(); - } + std::chrono::milliseconds listenerFiltersTimeout() const override { return {}; } bool continueOnListenerFiltersTimeout() const override { return false; } Stats::Scope& listenerScope() override { return parent_.stats_store_; } uint64_t listenerTag() const override { return 0; } @@ -680,11 +685,15 @@ class FakeUpstream : Logger::Loggable, envoy::config::core::v3::TrafficDirection direction() const override { return envoy::config::core::v3::UNSPECIFIED; } + const std::vector& accessLogs() const override { + return empty_access_logs_; + } FakeUpstream& parent_; const std::string name_; Network::NopConnectionBalancerImpl connection_balancer_; const Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; + const std::vector empty_access_logs_; }; void threadRoutine(); diff --git a/test/integration/filter_manager_integration_test.cc b/test/integration/filter_manager_integration_test.cc index d8830aad80c5..5a9dbbc52366 100644 --- a/test/integration/filter_manager_integration_test.cc +++ b/test/integration/filter_manager_integration_test.cc @@ -1,9 +1,18 @@ -#include "test/integration/filter_manager_integration_test.h" - #include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/server/filter_config.h" + +#include "common/buffer/buffer_impl.h" +#include "extensions/filters/network/common/factory_base.h" + +#include "test/integration/filter_manager_integration_test.pb.h" +#include "test/integration/filter_manager_integration_test.pb.validate.h" #include "test/integration/http_integration.h" #include "test/integration/integration.h" #include "test/integration/utility.h" @@ -16,6 +25,254 @@ namespace Envoy { namespace { +/** + * Basic traffic throttler that emits a next chunk of the original request/response data + * on timer tick. + */ +class Throttler { +public: + Throttler(Event::Dispatcher& dispatcher, std::chrono::milliseconds tick_interval, + uint64_t max_chunk_length, std::function next_chunk_cb) + : timer_(dispatcher.createTimer([this] { onTimerTick(); })), tick_interval_(tick_interval), + max_chunk_length_(max_chunk_length), next_chunk_cb_(next_chunk_cb) {} + + /** + * Throttle given given request/response data. + */ + void throttle(Buffer::Instance& data, bool end_stream); + /** + * Cancel any scheduled activities (on connection close). + */ + void reset(); + +private: + void onTimerTick(); + + Buffer::OwnedImpl buffer_{}; + bool end_stream_{}; + + const Event::TimerPtr timer_; + const std::chrono::milliseconds tick_interval_; + const uint64_t max_chunk_length_; + const std::function next_chunk_cb_; +}; + +void Throttler::throttle(Buffer::Instance& data, bool end_stream) { + buffer_.move(data); + end_stream_ |= end_stream; + if (!timer_->enabled()) { + timer_->enableTimer(tick_interval_); + } +} + +void Throttler::reset() { timer_->disableTimer(); } + +void Throttler::onTimerTick() { + Buffer::OwnedImpl next_chunk{}; + if (0 < buffer_.length()) { + auto chunk_length = max_chunk_length_ < buffer_.length() ? max_chunk_length_ : buffer_.length(); + next_chunk.move(buffer_, chunk_length); + } + bool end_stream = end_stream_ && 0 == buffer_.length(); + if (0 < buffer_.length()) { + timer_->enableTimer(tick_interval_); + } + next_chunk_cb_(next_chunk, end_stream); +} + +/** + * Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain() + * and WriteFilterCallbacks::injectWriteDataToFilterChain() methods in the context of a timer + * callback. + * + * Emits a next chunk of the original request/response data on timer tick. + */ +class ThrottlerFilter : public Network::Filter, public Network::ConnectionCallbacks { +public: + ThrottlerFilter(std::chrono::milliseconds tick_interval, uint64_t max_chunk_length) + : tick_interval_(tick_interval), max_chunk_length_(max_chunk_length) {} + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + Network::FilterStatus onNewConnection() override; + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override { + read_callbacks_ = &callbacks; + read_callbacks_->connection().addConnectionCallbacks(*this); + + read_throttler_ = std::make_unique( + read_callbacks_->connection().dispatcher(), tick_interval_, max_chunk_length_, + [this](Buffer::Instance& data, bool end_stream) { + read_callbacks_->injectReadDataToFilterChain(data, end_stream); + }); + } + + // Network::WriteFilter + Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override; + void initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) override { + write_callbacks_ = &callbacks; + + write_throttler_ = std::make_unique( + write_callbacks_->connection().dispatcher(), tick_interval_, max_chunk_length_, + [this](Buffer::Instance& data, bool end_stream) { + write_callbacks_->injectWriteDataToFilterChain(data, end_stream); + }); + } + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + +private: + Network::ReadFilterCallbacks* read_callbacks_{}; + Network::WriteFilterCallbacks* write_callbacks_{}; + + std::unique_ptr read_throttler_; + std::unique_ptr write_throttler_; + + const std::chrono::milliseconds tick_interval_; + const uint64_t max_chunk_length_; +}; + +// Network::ReadFilter +Network::FilterStatus ThrottlerFilter::onNewConnection() { return Network::FilterStatus::Continue; } + +Network::FilterStatus ThrottlerFilter::onData(Buffer::Instance& data, bool end_stream) { + read_throttler_->throttle(data, end_stream); + ASSERT(data.length() == 0); + return Network::FilterStatus::StopIteration; +} + +// Network::WriteFilter +Network::FilterStatus ThrottlerFilter::onWrite(Buffer::Instance& data, bool end_stream) { + write_throttler_->throttle(data, end_stream); + ASSERT(data.length() == 0); + return Network::FilterStatus::StopIteration; +} + +// Network::ConnectionCallbacks +void ThrottlerFilter::onEvent(Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + read_throttler_->reset(); + write_throttler_->reset(); + } +} + +/** + * Config factory for ThrottlerFilter. + */ +class ThrottlerFilterConfigFactory : public Extensions::NetworkFilters::Common::FactoryBase< + test::integration::filter_manager::Throttler> { +public: + explicit ThrottlerFilterConfigFactory(const std::string& name) : FactoryBase(name) {} + +private: + Network::FilterFactoryCb createFilterFactoryFromProtoTyped( + const test::integration::filter_manager::Throttler& proto_config, + Server::Configuration::FactoryContext&) override { + return [proto_config](Network::FilterManager& filter_manager) -> void { + filter_manager.addFilter(std::make_shared( + std::chrono::milliseconds(proto_config.tick_interval_ms()), + proto_config.max_chunk_length())); + }; + } +}; + +/** + * Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain() + * and WriteFilterCallbacks::injectWriteDataToFilterChain() methods in the context of + * ReadFilter::onData() and WriteFilter::onWrite(). + * + * Calls ReadFilterCallbacks::injectReadDataToFilterChain() / + * WriteFilterCallbacks::injectWriteDataToFilterChain() to pass data to the next filter + * byte-by-byte. + */ +class DispenserFilter : public Network::Filter { +public: + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + Network::FilterStatus onNewConnection() override; + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override { + read_callbacks_ = &callbacks; + } + + // Network::WriteFilter + Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override; + void initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) override { + write_callbacks_ = &callbacks; + } + +private: + // Pass data to the next filter byte-by-byte. + void dispense(Buffer::Instance& data, bool end_stream, + std::function next_chunk_cb); + + Network::ReadFilterCallbacks* read_callbacks_{}; + Network::WriteFilterCallbacks* write_callbacks_{}; +}; + +// Network::ReadFilter +Network::FilterStatus DispenserFilter::onNewConnection() { return Network::FilterStatus::Continue; } + +Network::FilterStatus DispenserFilter::onData(Buffer::Instance& data, bool end_stream) { + dispense(data, end_stream, [this](Buffer::Instance& data, bool end_stream) { + read_callbacks_->injectReadDataToFilterChain(data, end_stream); + }); + ASSERT(data.length() == 0); + return Network::FilterStatus::StopIteration; +} + +// Network::WriteFilter +Network::FilterStatus DispenserFilter::onWrite(Buffer::Instance& data, bool end_stream) { + dispense(data, end_stream, [this](Buffer::Instance& data, bool end_stream) { + write_callbacks_->injectWriteDataToFilterChain(data, end_stream); + }); + ASSERT(data.length() == 0); + return Network::FilterStatus::StopIteration; +} + +// Pass data to the next filter byte-by-byte. +void DispenserFilter::dispense(Buffer::Instance& data, bool end_stream, + std::function next_chunk_cb) { + Buffer::OwnedImpl next_chunk{}; + do { + if (0 < data.length()) { + next_chunk.move(data, 1); + } + next_chunk_cb(next_chunk, end_stream && 0 == data.length()); + next_chunk.drain(next_chunk.length()); + } while (0 < data.length()); +} + +/** + * Config factory for DispenserFilter. + */ +class DispenserFilterConfigFactory : public Server::Configuration::NamedNetworkFilterConfigFactory { +public: + explicit DispenserFilterConfigFactory(const std::string& name) : name_(name) {} + + // NamedNetworkFilterConfigFactory + Network::FilterFactoryCb + createFilterFactoryFromProto(const Protobuf::Message&, + Server::Configuration::FactoryContext&) override { + return [](Network::FilterManager& filter_manager) -> void { + filter_manager.addFilter(std::make_shared()); + }; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom per-filter empty config proto + // This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } + + std::string name() const override { return name_; } + +private: + const std::string name_; +}; + // Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain() // and WriteFilterCallbacks::injectWriteDataToFilterChain() methods outside of the context of // ReadFilter::onData() and WriteFilter::onWrite(), i.e. on timer event @@ -176,16 +433,16 @@ class InjectDataToFilterChainIntegrationTest */ class InjectDataWithEchoFilterIntegrationTest : public InjectDataToFilterChainIntegrationTest { public: - static std::string echo_config() { - return ConfigHelper::BASE_CONFIG + R"EOF( + static std::string echoConfig() { + return absl::StrCat(ConfigHelper::baseConfig(), R"EOF( filter_chains: filters: - name: envoy.filters.network.echo - )EOF"; + )EOF"); } InjectDataWithEchoFilterIntegrationTest() - : InjectDataToFilterChainIntegrationTest(echo_config()) {} + : InjectDataToFilterChainIntegrationTest(echoConfig()) {} }; INSTANTIATE_TEST_SUITE_P( @@ -204,13 +461,33 @@ TEST_P(InjectDataWithEchoFilterIntegrationTest, UsageOfInjectDataMethodsShouldBe tcp_client->close(); } +TEST_P(InjectDataWithEchoFilterIntegrationTest, FilterChainMismatch) { + useListenerAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_filter_chains(0) + ->mutable_filter_chain_match() + ->set_transport_protocol("tls"); + }); + initialize(); + + auto tcp_client = makeTcpConnection(lookupPort("listener_0")); + tcp_client->write("hello"); + + std::string access_log = + absl::StrCat("NR ", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound); + EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::HasSubstr(access_log)); + tcp_client->close(); +} + /** * Integration test with an auxiliary filter in front of "envoy.filters.network.tcp_proxy". */ class InjectDataWithTcpProxyFilterIntegrationTest : public InjectDataToFilterChainIntegrationTest { public: InjectDataWithTcpProxyFilterIntegrationTest() - : InjectDataToFilterChainIntegrationTest(ConfigHelper::TCP_PROXY_CONFIG) {} + : InjectDataToFilterChainIntegrationTest(ConfigHelper::tcpProxyConfig()) {} }; INSTANTIATE_TEST_SUITE_P( diff --git a/test/integration/filter_manager_integration_test.h b/test/integration/filter_manager_integration_test.h deleted file mode 100644 index 829d64143492..000000000000 --- a/test/integration/filter_manager_integration_test.h +++ /dev/null @@ -1,268 +0,0 @@ -#pragma once - -#include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" -#include "envoy/network/connection.h" -#include "envoy/network/filter.h" -#include "envoy/server/filter_config.h" - -#include "common/buffer/buffer_impl.h" - -#include "extensions/filters/network/common/factory_base.h" - -#include "test/integration/filter_manager_integration_test.pb.h" -#include "test/integration/filter_manager_integration_test.pb.validate.h" - -namespace Envoy { -namespace { - -/** - * Basic traffic throttler that emits a next chunk of the original request/response data - * on timer tick. - */ -class Throttler { -public: - Throttler(Event::Dispatcher& dispatcher, std::chrono::milliseconds tick_interval, - uint64_t max_chunk_length, std::function next_chunk_cb) - : timer_(dispatcher.createTimer([this] { onTimerTick(); })), tick_interval_(tick_interval), - max_chunk_length_(max_chunk_length), next_chunk_cb_(next_chunk_cb) {} - - /** - * Throttle given given request/response data. - */ - void throttle(Buffer::Instance& data, bool end_stream); - /** - * Cancel any scheduled activities (on connection close). - */ - void reset(); - -private: - void onTimerTick(); - - Buffer::OwnedImpl buffer_{}; - bool end_stream_{}; - - const Event::TimerPtr timer_; - const std::chrono::milliseconds tick_interval_; - const uint64_t max_chunk_length_; - const std::function next_chunk_cb_; -}; - -void Throttler::throttle(Buffer::Instance& data, bool end_stream) { - buffer_.move(data); - end_stream_ |= end_stream; - if (!timer_->enabled()) { - timer_->enableTimer(tick_interval_); - } -} - -void Throttler::reset() { timer_->disableTimer(); } - -void Throttler::onTimerTick() { - Buffer::OwnedImpl next_chunk{}; - if (0 < buffer_.length()) { - auto chunk_length = max_chunk_length_ < buffer_.length() ? max_chunk_length_ : buffer_.length(); - next_chunk.move(buffer_, chunk_length); - } - bool end_stream = end_stream_ && 0 == buffer_.length(); - if (0 < buffer_.length()) { - timer_->enableTimer(tick_interval_); - } - next_chunk_cb_(next_chunk, end_stream); -} - -/** - * Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain() - * and WriteFilterCallbacks::injectWriteDataToFilterChain() methods in the context of a timer - * callback. - * - * Emits a next chunk of the original request/response data on timer tick. - */ -class ThrottlerFilter : public Network::Filter, public Network::ConnectionCallbacks { -public: - ThrottlerFilter(std::chrono::milliseconds tick_interval, uint64_t max_chunk_length) - : tick_interval_(tick_interval), max_chunk_length_(max_chunk_length) {} - - // Network::ReadFilter - Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; - Network::FilterStatus onNewConnection() override; - void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override { - read_callbacks_ = &callbacks; - read_callbacks_->connection().addConnectionCallbacks(*this); - - read_throttler_ = std::make_unique( - read_callbacks_->connection().dispatcher(), tick_interval_, max_chunk_length_, - [this](Buffer::Instance& data, bool end_stream) { - read_callbacks_->injectReadDataToFilterChain(data, end_stream); - }); - } - - // Network::WriteFilter - Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override; - void initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) override { - write_callbacks_ = &callbacks; - - write_throttler_ = std::make_unique( - write_callbacks_->connection().dispatcher(), tick_interval_, max_chunk_length_, - [this](Buffer::Instance& data, bool end_stream) { - write_callbacks_->injectWriteDataToFilterChain(data, end_stream); - }); - } - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override; - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - -private: - Network::ReadFilterCallbacks* read_callbacks_{}; - Network::WriteFilterCallbacks* write_callbacks_{}; - - std::unique_ptr read_throttler_; - std::unique_ptr write_throttler_; - - const std::chrono::milliseconds tick_interval_; - const uint64_t max_chunk_length_; -}; - -// Network::ReadFilter -Network::FilterStatus ThrottlerFilter::onNewConnection() { return Network::FilterStatus::Continue; } - -Network::FilterStatus ThrottlerFilter::onData(Buffer::Instance& data, bool end_stream) { - read_throttler_->throttle(data, end_stream); - ASSERT(data.length() == 0); - return Network::FilterStatus::StopIteration; -} - -// Network::WriteFilter -Network::FilterStatus ThrottlerFilter::onWrite(Buffer::Instance& data, bool end_stream) { - write_throttler_->throttle(data, end_stream); - ASSERT(data.length() == 0); - return Network::FilterStatus::StopIteration; -} - -// Network::ConnectionCallbacks -void ThrottlerFilter::onEvent(Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - read_throttler_->reset(); - write_throttler_->reset(); - } -} - -/** - * Config factory for ThrottlerFilter. - */ -class ThrottlerFilterConfigFactory : public Extensions::NetworkFilters::Common::FactoryBase< - test::integration::filter_manager::Throttler> { -public: - explicit ThrottlerFilterConfigFactory(const std::string& name) : FactoryBase(name) {} - -private: - Network::FilterFactoryCb createFilterFactoryFromProtoTyped( - const test::integration::filter_manager::Throttler& proto_config, - Server::Configuration::FactoryContext&) override { - return [proto_config](Network::FilterManager& filter_manager) -> void { - filter_manager.addFilter(std::make_shared( - std::chrono::milliseconds(proto_config.tick_interval_ms()), - proto_config.max_chunk_length())); - }; - } -}; - -/** - * Auxiliary network filter that makes use of ReadFilterCallbacks::injectReadDataToFilterChain() - * and WriteFilterCallbacks::injectWriteDataToFilterChain() methods in the context of - * ReadFilter::onData() and WriteFilter::onWrite(). - * - * Calls ReadFilterCallbacks::injectReadDataToFilterChain() / - * WriteFilterCallbacks::injectWriteDataToFilterChain() to pass data to the next filter - * byte-by-byte. - */ -class DispenserFilter : public Network::Filter { -public: - // Network::ReadFilter - Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; - Network::FilterStatus onNewConnection() override; - void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override { - read_callbacks_ = &callbacks; - } - - // Network::WriteFilter - Network::FilterStatus onWrite(Buffer::Instance& data, bool end_stream) override; - void initializeWriteFilterCallbacks(Network::WriteFilterCallbacks& callbacks) override { - write_callbacks_ = &callbacks; - } - -private: - // Pass data to the next filter byte-by-byte. - void dispense(Buffer::Instance& data, bool end_stream, - std::function next_chunk_cb); - - Network::ReadFilterCallbacks* read_callbacks_{}; - Network::WriteFilterCallbacks* write_callbacks_{}; -}; - -// Network::ReadFilter -Network::FilterStatus DispenserFilter::onNewConnection() { return Network::FilterStatus::Continue; } - -Network::FilterStatus DispenserFilter::onData(Buffer::Instance& data, bool end_stream) { - dispense(data, end_stream, [this](Buffer::Instance& data, bool end_stream) { - read_callbacks_->injectReadDataToFilterChain(data, end_stream); - }); - ASSERT(data.length() == 0); - return Network::FilterStatus::StopIteration; -} - -// Network::WriteFilter -Network::FilterStatus DispenserFilter::onWrite(Buffer::Instance& data, bool end_stream) { - dispense(data, end_stream, [this](Buffer::Instance& data, bool end_stream) { - write_callbacks_->injectWriteDataToFilterChain(data, end_stream); - }); - ASSERT(data.length() == 0); - return Network::FilterStatus::StopIteration; -} - -// Pass data to the next filter byte-by-byte. -void DispenserFilter::dispense(Buffer::Instance& data, bool end_stream, - std::function next_chunk_cb) { - Buffer::OwnedImpl next_chunk{}; - do { - if (0 < data.length()) { - next_chunk.move(data, 1); - } - next_chunk_cb(next_chunk, end_stream && 0 == data.length()); - next_chunk.drain(next_chunk.length()); - } while (0 < data.length()); -} - -/** - * Config factory for DispenserFilter. - */ -class DispenserFilterConfigFactory : public Server::Configuration::NamedNetworkFilterConfigFactory { -public: - explicit DispenserFilterConfigFactory(const std::string& name) : name_(name) {} - - // NamedNetworkFilterConfigFactory - Network::FilterFactoryCb - createFilterFactoryFromProto(const Protobuf::Message&, - Server::Configuration::FactoryContext&) override { - return [](Network::FilterManager& filter_manager) -> void { - filter_manager.addFilter(std::make_shared()); - }; - } - - ProtobufTypes::MessagePtr createEmptyConfigProto() override { - // Using Struct instead of a custom per-filter empty config proto - // This is only allowed in tests. - return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; - } - - std::string name() const override { return name_; } - -private: - const std::string name_; -}; - -} // namespace -} // namespace Envoy diff --git a/test/integration/header_prefix_integration_test.cc b/test/integration/header_prefix_integration_test.cc index 2fa88bc4f551..0400d2f25a3c 100644 --- a/test/integration/header_prefix_integration_test.cc +++ b/test/integration/header_prefix_integration_test.cc @@ -18,7 +18,7 @@ static const char* custom_prefix_ = "x-custom"; class HeaderPrefixIntegrationTest : public HttpProtocolIntegrationTest { public: - static void SetUpTestSuite() { + static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) ThreadSafeSingleton::get().setPrefix(custom_prefix_); } }; diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 4a1340755b24..2ca27de3d0c3 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -28,12 +28,41 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, Http2IntegrationTest, TestUtility::ipTestParamsToString); TEST_P(Http2IntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) { - testRouterRequestAndResponseWithBody(1024, 512, false); + testRouterRequestAndResponseWithBody(1024, 512, false, false); +} + +TEST_P(Http2IntegrationTest, RouterRequestAndResponseWithGiantBodyNoBuffer) { + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false); } TEST_P(Http2IntegrationTest, FlowControlOnAndGiantBody) { config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. - testRouterRequestAndResponseWithBody(1024 * 1024, 1024 * 1024, false); + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false); +} + +TEST_P(Http2IntegrationTest, LargeFlowControlOnAndGiantBody) { + config_helper_.setBufferLimits(128 * 1024, + 128 * 1024); // Set buffer limits upstream and downstream. + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false); +} + +TEST_P(Http2IntegrationTest, RouterRequestAndResponseWithBodyAndContentLengthNoBuffer) { + testRouterRequestAndResponseWithBody(1024, 512, false, true); +} + +TEST_P(Http2IntegrationTest, RouterRequestAndResponseWithGiantBodyAndContentLengthNoBuffer) { + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true); +} + +TEST_P(Http2IntegrationTest, FlowControlOnAndGiantBodyWithContentLength) { + config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true); +} + +TEST_P(Http2IntegrationTest, LargeFlowControlOnAndGiantBodyWithContentLength) { + config_helper_.setBufferLimits(128 * 1024, + 128 * 1024); // Set buffer limits upstream and downstream. + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true); } TEST_P(Http2IntegrationTest, RouterHeaderOnlyRequestAndResponseNoBuffer) { @@ -825,7 +854,7 @@ TEST_P(Http2IntegrationTest, BadFrame) { // Send client headers, a GoAway and then a body and ensure the full request and // response are received. TEST_P(Http2IntegrationTest, GoAway) { - config_helper_.addFilter(ConfigHelper::DEFAULT_HEALTH_CHECK_FILTER); + config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -1622,6 +1651,10 @@ TEST_P(Http2FloodMitigationTest, RST_STREAM) { auto response = readFrame(); // Make sure we've got RST_STREAM from the server EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); + + // Disable reading to make sure that the RST_STREAM frames stack up on the server. + tcp_client_->readDisable(true); + uint64_t total_bytes_sent = 0; while (total_bytes_sent < TransmitThreshold && tcp_client_->connected()) { request = Http::Http2::Http2Frame::makeMalformedRequest(++i); diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index db556536f441..f485f3083c64 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -256,19 +256,6 @@ HttpIntegrationTest::~HttpIntegrationTest() { fake_upstreams_.clear(); } -std::string HttpIntegrationTest::waitForAccessLog(const std::string& filename) { - // Wait a max of 1s for logs to flush to disk. - for (int i = 0; i < 1000; ++i) { - std::string contents = TestEnvironment::readFileToStringForTest(filename, false); - if (contents.length() > 0) { - return contents; - } - absl::SleepFor(absl::Milliseconds(1)); - } - RELEASE_ASSERT(0, "Timed out waiting for access log"); - return ""; -} - void HttpIntegrationTest::setDownstreamProtocol(Http::CodecClient::Type downstream_protocol) { downstream_protocol_ = downstream_protocol; config_helper_.setClientCodec(typeToCodecType(downstream_protocol_)); @@ -411,7 +398,7 @@ void HttpIntegrationTest::checkSimpleRequestSuccess(uint64_t expected_request_si } void HttpIntegrationTest::testRouterRequestAndResponseWithBody( - uint64_t request_size, uint64_t response_size, bool big_header, + uint64_t request_size, uint64_t response_size, bool big_header, bool set_content_length_header, ConnectionCreationFunction* create_connection) { initialize(); codec_client_ = makeHttpConnection( @@ -419,11 +406,16 @@ void HttpIntegrationTest::testRouterRequestAndResponseWithBody( Http::TestRequestHeaderMapImpl request_headers{ {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}, {"x-lyft-user-id", "123"}, {"x-forwarded-for", "10.0.0.1"}}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + if (set_content_length_header) { + request_headers.setContentLength(request_size); + response_headers.setContentLength(response_size); + } if (big_header) { request_headers.addCopy("big", std::string(4096, 'a')); } - auto response = sendRequestAndWaitForResponse(request_headers, request_size, - default_response_headers_, response_size); + auto response = + sendRequestAndWaitForResponse(request_headers, request_size, response_headers, response_size); checkSimpleRequestSuccess(request_size, response_size, response.get()); } @@ -476,6 +468,51 @@ void HttpIntegrationTest::testRouterNotFoundWithBody() { EXPECT_EQ("404", response->headers().Status()->value().getStringView()); } +// Make sure virtual cluster stats are charged to the appropriate virtual cluster. +void HttpIntegrationTest::testRouterVirtualClusters() { + const std::string matching_header = "x-use-test-vcluster"; + config_helper_.addConfigModifier( + [matching_header]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* route_config = hcm.mutable_route_config(); + ASSERT_EQ(1, route_config->virtual_hosts_size()); + auto* virtual_host = route_config->mutable_virtual_hosts(0); + { + auto* virtual_cluster = virtual_host->add_virtual_clusters(); + virtual_cluster->set_name("test_vcluster"); + auto* headers = virtual_cluster->add_headers(); + headers->set_name(matching_header); + headers->set_present_match(true); + } + }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + Http::TestRequestHeaderMapImpl request_headers{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {matching_header, "true"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + checkSimpleRequestSuccess(0, 0, response.get()); + + test_server_->waitForCounterEq("vhost.integration.vcluster.test_vcluster.upstream_rq_total", 1); + test_server_->waitForCounterEq("vhost.integration.vcluster.other.upstream_rq_total", 0); + + Http::TestRequestHeaderMapImpl request_headers2{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}; + + auto response2 = sendRequestAndWaitForResponse(request_headers2, 0, default_response_headers_, 0); + checkSimpleRequestSuccess(0, 0, response2.get()); + + test_server_->waitForCounterEq("vhost.integration.vcluster.test_vcluster.upstream_rq_total", 1); + test_server_->waitForCounterEq("vhost.integration.vcluster.other.upstream_rq_total", 1); +} + void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeRequestComplete() { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -1130,6 +1167,48 @@ void HttpIntegrationTest::testTrailers(uint64_t request_size, uint64_t response_ } } +void HttpIntegrationTest::testAdminDrain(Http::CodecClient::Type admin_request_type) { + initialize(); + + uint32_t http_port = lookupPort("http"); + codec_client_ = makeHttpConnection(http_port); + Http::TestRequestHeaderMapImpl request_headers{{":method", "HEAD"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}; + IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(0); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + + upstream_request_->encodeHeaders(default_response_headers_, false); + + // Invoke drain listeners endpoint and validate that we can still work on inflight requests. + BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners", "", admin_request_type, version_); + EXPECT_TRUE(admin_response->complete()); + EXPECT_EQ("200", admin_response->headers().Status()->value().getStringView()); + EXPECT_EQ("OK\n", admin_response->body()); + + upstream_request_->encodeData(512, true); + + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + + // Wait for the response to be read by the codec client. + response->waitForEndStream(); + + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Http::HttpStatusIs("200")); + + // Validate that the listeners have been stopped. + test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); + + // Validate that port is closed and can be bound by other sockets. + EXPECT_NO_THROW(Network::TcpListenSocket( + Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), + http_port), + nullptr, true)); +} + std::string HttpIntegrationTest::listenerStatPrefix(const std::string& stat_name) { if (version_ == Network::Address::IpVersion::v4) { return "listener.127.0.0.1_0." + stat_name; diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 3fbace26ffe7..99f2c8850521 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -87,22 +87,19 @@ class HttpIntegrationTest : public BaseIntegrationTest { // https://github.com/envoyproxy/envoy-filter-example/pull/69 is reverted. HttpIntegrationTest(Http::CodecClient::Type downstream_protocol, Network::Address::IpVersion version, TestTimeSystemPtr, - const std::string& config = ConfigHelper::HTTP_PROXY_CONFIG) + const std::string& config = ConfigHelper::httpProxyConfig()) : HttpIntegrationTest(downstream_protocol, version, config) {} HttpIntegrationTest(Http::CodecClient::Type downstream_protocol, Network::Address::IpVersion version, - const std::string& config = ConfigHelper::HTTP_PROXY_CONFIG); + const std::string& config = ConfigHelper::httpProxyConfig()); HttpIntegrationTest(Http::CodecClient::Type downstream_protocol, const InstanceConstSharedPtrFn& upstream_address_fn, Network::Address::IpVersion version, - const std::string& config = ConfigHelper::HTTP_PROXY_CONFIG); + const std::string& config = ConfigHelper::httpProxyConfig()); ~HttpIntegrationTest() override; - // Waits for the first access log entry. - std::string waitForAccessLog(const std::string& filename); - protected: void useAccessLog(absl::string_view format = ""); @@ -173,9 +170,10 @@ class HttpIntegrationTest : public BaseIntegrationTest { const std::string& authority = "host"); void testRouterNotFound(); void testRouterNotFoundWithBody(); + void testRouterVirtualClusters(); void testRouterRequestAndResponseWithBody(uint64_t request_size, uint64_t response_size, - bool big_header, + bool big_header, bool set_content_length_header = false, ConnectionCreationFunction* creator = nullptr); void testRouterHeaderOnlyRequestAndResponse(ConnectionCreationFunction* creator = nullptr, int upstream_index = 0, @@ -220,6 +218,8 @@ class HttpIntegrationTest : public BaseIntegrationTest { // makes sure they were dropped. void testTrailers(uint64_t request_size, uint64_t response_size, bool request_trailers_present, bool response_trailers_present); + // Test /drain_listener from admin portal. + void testAdminDrain(Http::CodecClient::Type admin_request_type); Http::CodecClient::Type downstreamProtocol() const { return downstream_protocol_; } // Prefix listener stat with IP:port, including IP version dependent loopback address. diff --git a/test/integration/http_subset_lb_integration_test.cc b/test/integration/http_subset_lb_integration_test.cc index 58be89b76d72..4137ec95bbed 100644 --- a/test/integration/http_subset_lb_integration_test.cc +++ b/test/integration/http_subset_lb_integration_test.cc @@ -51,7 +51,7 @@ class HttpSubsetLbIntegrationTest HttpSubsetLbIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, TestEnvironment::getIpVersionsForTest().front(), - ConfigHelper::HTTP_PROXY_CONFIG), + ConfigHelper::httpProxyConfig()), num_hosts_{4}, is_hash_lb_(GetParam() == envoy::config::cluster::v3::Cluster::RING_HASH || GetParam() == envoy::config::cluster::v3::Cluster::MAGLEV) { autonomous_upstream_ = true; diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index d4827b0d7148..299d875247be 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -282,7 +282,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterBidiData) { // Successful request/response when per-stream idle timeout is configured. TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutRequestAndResponse) { enable_per_stream_idle_timeout_ = true; - testRouterRequestAndResponseWithBody(1024, 1024, false, nullptr); + testRouterRequestAndResponseWithBody(1024, 1024, false); } TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutConfiguredRequestResponse) { @@ -292,7 +292,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutConfiguredRequestResponse) { TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutConfiguredRequestResponseWithBody) { enable_request_timeout_ = true; - testRouterRequestAndResponseWithBody(1024, 1024, false, nullptr); + testRouterRequestAndResponseWithBody(1024, 1024, false); } TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutTriggersOnBodilessPost) { diff --git a/test/integration/integration.cc b/test/integration/integration.cc index ad466fe7d3ec..3c862a81c0c0 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -281,10 +281,15 @@ BaseIntegrationTest::BaseIntegrationTest(Network::Address::IpVersion version, version, config) {} Network::ClientConnectionPtr BaseIntegrationTest::makeClientConnection(uint32_t port) { + return makeClientConnectionWithOptions(port, nullptr); +} + +Network::ClientConnectionPtr BaseIntegrationTest::makeClientConnectionWithOptions( + uint32_t port, const Network::ConnectionSocket::OptionsSharedPtr& options) { Network::ClientConnectionPtr connection(dispatcher_->createClientConnection( Network::Utility::resolveUrl( fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)), - Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr)); + Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), options)); connection->enableHalfClose(enable_half_close_); return connection; @@ -409,9 +414,20 @@ void BaseIntegrationTest::setUpstreamAddress( } void BaseIntegrationTest::registerTestServerPorts(const std::vector& port_names) { - auto port_it = port_names.cbegin(); - auto listeners = test_server_->server().listenerManager().listeners(); + bool listeners_ready = false; + absl::Mutex l; + std::vector> listeners; + test_server_->server().dispatcher().post([this, &listeners, &listeners_ready, &l]() { + listeners = test_server_->server().listenerManager().listeners(); + l.Lock(); + listeners_ready = true; + l.Unlock(); + }); + l.LockWhen(absl::Condition(&listeners_ready)); + l.Unlock(); + auto listener_it = listeners.cbegin(); + auto port_it = port_names.cbegin(); for (; port_it != port_names.end() && listener_it != listeners.end(); ++port_it, ++listener_it) { const auto listen_addr = listener_it->get().listenSocketFactory().localAddress(); if (listen_addr->type() == Network::Address::Type::Ip) { @@ -523,6 +539,24 @@ IntegrationTestServerPtr BaseIntegrationTest::createIntegrationTestServer( defer_listener_finalization_); } +void BaseIntegrationTest::useListenerAccessLog(absl::string_view format) { + listener_access_log_name_ = TestEnvironment::temporaryPath(TestUtility::uniqueFilename()); + ASSERT_TRUE(config_helper_.setListenerAccessLog(listener_access_log_name_, format)); +} + +std::string BaseIntegrationTest::waitForAccessLog(const std::string& filename) { + // Wait a max of 1s for logs to flush to disk. + for (int i = 0; i < 1000; ++i) { + std::string contents = TestEnvironment::readFileToStringForTest(filename, false); + if (contents.length() > 0) { + return contents; + } + absl::SleepFor(absl::Milliseconds(1)); + } + RELEASE_ASSERT(0, "Timed out waiting for access log"); + return ""; +} + void BaseIntegrationTest::createXdsUpstream() { if (create_xds_upstream_ == false) { return; diff --git a/test/integration/integration.h b/test/integration/integration.h index cb1133eabb2e..791df09079fb 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -152,15 +152,15 @@ class BaseIntegrationTest : protected Logger::Loggable { // Creates a test fixture with an upstream bound to INADDR_ANY on an unspecified port using the // provided IP |version|. BaseIntegrationTest(Network::Address::IpVersion version, - const std::string& config = ConfigHelper::HTTP_PROXY_CONFIG); + const std::string& config = ConfigHelper::httpProxyConfig()); BaseIntegrationTest(Network::Address::IpVersion version, TestTimeSystemPtr, - const std::string& config = ConfigHelper::HTTP_PROXY_CONFIG) + const std::string& config = ConfigHelper::httpProxyConfig()) : BaseIntegrationTest(version, config) {} // Creates a test fixture with a specified |upstream_address| function that provides the IP and // port to use. BaseIntegrationTest(const InstanceConstSharedPtrFn& upstream_address_fn, Network::Address::IpVersion version, - const std::string& config = ConfigHelper::HTTP_PROXY_CONFIG); + const std::string& config = ConfigHelper::httpProxyConfig()); virtual ~BaseIntegrationTest() = default; @@ -197,7 +197,10 @@ class BaseIntegrationTest : protected Logger::Loggable { void setUpstreamAddress(uint32_t upstream_index, envoy::config::endpoint::v3::LbEndpoint& endpoint) const; - virtual Network::ClientConnectionPtr makeClientConnection(uint32_t port); + Network::ClientConnectionPtr makeClientConnection(uint32_t port); + virtual Network::ClientConnectionPtr + makeClientConnectionWithOptions(uint32_t port, + const Network::ConnectionSocket::OptionsSharedPtr& options); void registerTestServerPorts(const std::vector& port_names); void createTestServer(const std::string& json_path, const std::vector& port_names); @@ -217,6 +220,13 @@ class BaseIntegrationTest : protected Logger::Loggable { Api::ApiPtr api_for_server_stat_store_; MockBufferFactory* mock_buffer_factory_; // Will point to the dispatcher's factory. + // Enable the listener access log + void useListenerAccessLog(absl::string_view format = ""); + // Waits for the first access log entry. + std::string waitForAccessLog(const std::string& filename); + + std::string listener_access_log_name_; + // Functions for testing reloadable config (xDS) void createXdsUpstream(); void createXdsConnection(); @@ -318,7 +328,7 @@ class BaseIntegrationTest : protected Logger::Loggable { resource->set_name(TestUtility::xdsResourceName(temp_any)); resource->set_version(version); resource->mutable_resource()->PackFrom(API_DOWNGRADE(message)); - for (const auto alias : aliases) { + for (const auto& alias : aliases) { resource->add_aliases(alias); } } diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index d0076c27ef0d..a5ecdcf30b56 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -68,7 +68,7 @@ TEST_P(IntegrationAdminTest, HealthCheckWithoutServerStats) { } TEST_P(IntegrationAdminTest, HealthCheckWithBufferFilter) { - config_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::defaultBufferFilter()); initialize(); BufferingStreamDecoderPtr response; @@ -267,14 +267,17 @@ TEST_P(IntegrationAdminTest, Admin) { switch (GetParam().downstream_protocol) { case Http::CodecClient::Type::HTTP1: EXPECT_EQ(" Count Lookup\n" + " 1 http1.dropped_headers_with_underscores\n" " 1 http1.metadata_not_supported_error\n" + " 1 http1.requests_rejected_with_underscores_in_headers\n" " 1 http1.response_flood\n" "\n" - "total: 2\n", + "total: 4\n", response->body()); break; case Http::CodecClient::Type::HTTP2: EXPECT_EQ(" Count Lookup\n" + " 1 http2.dropped_headers_with_underscores\n" " 1 http2.header_overflow\n" " 1 http2.headers_cb_no_stream\n" " 1 http2.inbound_empty_frames_flood\n" @@ -282,13 +285,14 @@ TEST_P(IntegrationAdminTest, Admin) { " 1 http2.inbound_window_update_frames_flood\n" " 1 http2.outbound_control_flood\n" " 1 http2.outbound_flood\n" + " 1 http2.requests_rejected_with_underscores_in_headers\n" " 1 http2.rx_messaging_error\n" " 1 http2.rx_reset\n" " 1 http2.too_many_header_frames\n" " 1 http2.trailers\n" " 1 http2.tx_reset\n" "\n" - "total: 12\n", + "total: 14\n", response->body()); break; case Http::CodecClient::Type::HTTP3: diff --git a/test/integration/integration_admin_test.h b/test/integration/integration_admin_test.h index 6cc8c10a2018..97426d52a651 100644 --- a/test/integration/integration_admin_test.h +++ b/test/integration/integration_admin_test.h @@ -15,7 +15,7 @@ namespace Envoy { class IntegrationAdminTest : public HttpProtocolIntegrationTest { public: void initialize() override { - config_helper_.addFilter(ConfigHelper::DEFAULT_HEALTH_CHECK_FILTER); + config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); HttpIntegrationTest::initialize(); } diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 63a2e4f33f01..c18599476f5f 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -8,6 +8,7 @@ #include "common/http/header_map_impl.h" #include "common/http/headers.h" +#include "common/network/socket_option_impl.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" @@ -96,47 +97,7 @@ TEST_P(IntegrationTest, PerWorkerStatsAndBalancing) { } // Validates that the drain actually drains the listeners. -TEST_P(IntegrationTest, AdminDrainDrainsListeners) { - initialize(); - - uint32_t http_port = lookupPort("http"); - codec_client_ = makeHttpConnection(http_port); - Http::TestRequestHeaderMapImpl request_headers{{":method", "HEAD"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}; - IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(request_headers); - waitForNextUpstreamRequest(0); - fake_upstreams_[0]->set_allow_unexpected_disconnects(true); - - upstream_request_->encodeHeaders(default_response_headers_, false); - - // Invoke drain listeners endpoint and validate that we can still work on inflight requests. - BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest( - lookupPort("admin"), "POST", "/drain_listeners", "", downstreamProtocol(), version_); - EXPECT_TRUE(admin_response->complete()); - EXPECT_EQ("200", admin_response->headers().Status()->value().getStringView()); - EXPECT_EQ("OK\n", admin_response->body()); - - upstream_request_->encodeData(512, true); - - ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - - // Wait for the response to be read by the codec client. - response->waitForEndStream(); - - ASSERT_TRUE(response->complete()); - EXPECT_THAT(response->headers(), Http::HttpStatusIs("200")); - - // Validate that the listeners have been stopped. - test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); - - // Validate that port is closed and can be bound by other sockets. - EXPECT_NO_THROW(Network::TcpListenSocket( - Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(GetParam()), - http_port), - nullptr, true)); -} +TEST_P(IntegrationTest, AdminDrainDrainsListeners) { testAdminDrain(downstreamProtocol()); } TEST_P(IntegrationTest, RouterDirectResponse) { const std::string body = "Response body"; @@ -180,7 +141,7 @@ TEST_P(IntegrationTest, RouterDirectResponse) { } TEST_P(IntegrationTest, ConnectionClose) { - config_helper_.addFilter(ConfigHelper::DEFAULT_HEALTH_CHECK_FILTER); + config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -197,12 +158,39 @@ TEST_P(IntegrationTest, ConnectionClose) { } TEST_P(IntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) { - testRouterRequestAndResponseWithBody(1024, 512, false); + testRouterRequestAndResponseWithBody(1024, 512, false, false); +} + +TEST_P(IntegrationTest, RouterRequestAndResponseWithGiantBodyNoBuffer) { + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false); } TEST_P(IntegrationTest, FlowControlOnAndGiantBody) { config_helper_.setBufferLimits(1024, 1024); - testRouterRequestAndResponseWithBody(1024 * 1024, 1024 * 1024, false); + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false); +} + +TEST_P(IntegrationTest, LargeFlowControlOnAndGiantBody) { + config_helper_.setBufferLimits(128 * 1024, 128 * 1024); + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, false); +} + +TEST_P(IntegrationTest, RouterRequestAndResponseWithBodyAndContentLengthNoBuffer) { + testRouterRequestAndResponseWithBody(1024, 512, false, true); +} + +TEST_P(IntegrationTest, RouterRequestAndResponseWithGiantBodyAndContentLengthNoBuffer) { + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true); +} + +TEST_P(IntegrationTest, FlowControlOnAndGiantBodyWithContentLength) { + config_helper_.setBufferLimits(1024, 1024); + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true); +} + +TEST_P(IntegrationTest, LargeFlowControlOnAndGiantBodyWithContentLength) { + config_helper_.setBufferLimits(128 * 1024, 128 * 1024); + testRouterRequestAndResponseWithBody(10 * 1024 * 1024, 10 * 1024 * 1024, false, true); } TEST_P(IntegrationTest, RouterRequestAndResponseLargeHeaderNoBuffer) { @@ -236,10 +224,8 @@ TEST_P(IntegrationTest, ResponseFramedByConnectionCloseWithReadLimits) { auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); // Disable chunk encoding to trigger framing by connection close. - // TODO: This request should be propagated to codecs via API, instead of using a pseudo-header. - // See: https://github.com/envoyproxy/envoy/issues/9749 - upstream_request_->encodeHeaders( - Http::TestResponseHeaderMapImpl{{":status", "200"}, {":no-chunks", "1"}}, false); + upstream_request_->http1StreamEncoderOptions().value().get().disableChunkEncoding(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); upstream_request_->encodeData(512, true); ASSERT_TRUE(fake_upstream_connection_->close()); @@ -384,14 +370,13 @@ TEST_P(IntegrationTest, TestSmuggling) { EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); } - // Make sure unsupported transfer encodings are rejected, lest they be abused. { std::string response; const std::string request = "GET / HTTP/1.1\r\nHost: host\r\ntransfer-encoding: " "identity,chunked \r\ncontent-length: 36\r\n\r\n" + smuggled_request; sendRawHttpAndWaitForResponse(lookupPort("http"), request.c_str(), &response, false); - EXPECT_EQ("HTTP/1.1 501 Not Implemented\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", + EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); } } @@ -1209,6 +1194,49 @@ TEST_P(IntegrationTest, TestFlood) { EXPECT_EQ(1, test_server_->counter("http1.response_flood")->value()); } +TEST_P(IntegrationTest, TestFloodUpstreamErrors) { + autonomous_upstream_ = true; + initialize(); + + // Set an Upstream reply with an invalid content-length, which will be rejected by the Envoy. + auto response_headers = std::make_unique( + Http::TestHeaderMapImpl({{":status", "200"}, {"content-length", "invalid"}})); + reinterpret_cast(fake_upstreams_.front().get()) + ->setResponseHeaders(std::move(response_headers)); + + // Set up a raw connection to easily send requests without reading responses. Also, set a small + // TCP receive buffer to speed up connection backup while proxying the response flood. + auto options = std::make_shared(); + options->emplace_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); + Network::ClientConnectionPtr raw_connection = + makeClientConnectionWithOptions(lookupPort("http"), options); + raw_connection->connect(); + + // Read disable so responses will queue up. + uint32_t bytes_to_send = 0; + raw_connection->readDisable(true); + // Track locally queued bytes, to make sure the outbound client queue doesn't back up. + raw_connection->addBytesSentCallback([&](uint64_t bytes) { bytes_to_send -= bytes; }); + + // Keep sending requests until flood protection kicks in and kills the connection. + while (raw_connection->state() == Network::Connection::State::Open) { + // The upstream response is invalid, and will trigger an internally generated error response + // from Envoy. + Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nhost: foo.com\r\n\r\n"); + bytes_to_send += buffer.length(); + raw_connection->write(buffer, false); + // Loop until all bytes are sent. + while (bytes_to_send > 0 && raw_connection->state() == Network::Connection::State::Open) { + raw_connection->dispatcher().run(Event::Dispatcher::RunType::NonBlock); + } + } + + // Verify the connection was closed due to flood protection. + EXPECT_EQ(1, test_server_->counter("http1.response_flood")->value()); +} + // Make sure flood protection doesn't kick in with many requests sent serially. TEST_P(IntegrationTest, TestManyBadRequests) { initialize(); @@ -1217,7 +1245,7 @@ TEST_P(IntegrationTest, TestManyBadRequests) { Http::TestRequestHeaderMapImpl bad_request{ {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}}; - for (int i = 0; i < 1; ++i) { + for (int i = 0; i < 1000; ++i) { IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(bad_request); response->waitForEndStream(); ASSERT_TRUE(response->complete()); diff --git a/test/integration/listener_filter_integration_test.cc b/test/integration/listener_filter_integration_test.cc new file mode 100644 index 000000000000..425cf0edaae8 --- /dev/null +++ b/test/integration/listener_filter_integration_test.cc @@ -0,0 +1,123 @@ +#include + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/extensions/access_loggers/file/v3/file.pb.h" + +#include "common/config/api_version.h" +#include "common/network/utility.h" + +#include "extensions/filters/listener/tls_inspector/tls_inspector.h" +#include "extensions/transport_sockets/tls/context_manager_impl.h" + +#include "test/integration/integration.h" +#include "test/integration/ssl_utility.h" +#include "test/integration/utility.h" +#include "test/mocks/secret/mocks.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +class ListenerFilterIntegrationTest : public testing::TestWithParam, + public BaseIntegrationTest { +public: + ListenerFilterIntegrationTest() + : BaseIntegrationTest(GetParam(), ConfigHelper::baseConfig() + R"EOF( + filter_chains: + filters: + - name: envoy.filters.network.echo +)EOF") {} + + ~ListenerFilterIntegrationTest() override = default; + std::string appendMatcher(const std::string& listener_filter, bool disabled) { + if (disabled) { + return listener_filter + + R"EOF( +filter_disabled: + any_match: true +)EOF"; + } else { + return listener_filter + + R"EOF( +filter_disabled: + not_match: + any_match: true +)EOF"; + } + } + + void initializeWithListenerFilter(absl::optional listener_filter_disabled = absl::nullopt) { + config_helper_.renameListener("echo"); + std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); + if (listener_filter_disabled.has_value()) { + tls_inspector_config = appendMatcher(tls_inspector_config, listener_filter_disabled.value()); + } + config_helper_.addListenerFilter(tls_inspector_config); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + auto* alpn = filter_chain->mutable_filter_chain_match()->add_application_protocols(); + *alpn = "envoyalpn"; + }); + config_helper_.addSslConfig(); + BaseIntegrationTest::initialize(); + + context_manager_ = + std::make_unique(timeSystem()); + } + + void setupConnections(bool listener_filter_disabled, bool expect_connection_open) { + initializeWithListenerFilter(listener_filter_disabled); + + // Set up the SSL client. + Network::Address::InstanceConstSharedPtr address = + Ssl::getSslAddress(version_, lookupPort("echo")); + context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_); + ssl_client_ = dispatcher_->createClientConnection( + address, Network::Address::InstanceConstSharedPtr(), + context_->createTransportSocket( + // nullptr + std::make_shared( + absl::string_view(""), std::vector(), + std::vector{"envoyalpn"})), + nullptr); + ssl_client_->addConnectionCallbacks(connect_callbacks_); + ssl_client_->connect(); + while (!connect_callbacks_.connected() && !connect_callbacks_.closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + if (expect_connection_open) { + ASSERT(connect_callbacks_.connected()); + ASSERT_FALSE(connect_callbacks_.closed()); + } else { + ASSERT_FALSE(connect_callbacks_.connected()); + ASSERT(connect_callbacks_.closed()); + } + } + std::unique_ptr context_manager_; + Network::TransportSocketFactoryPtr context_; + ConnectionStatusCallbacks connect_callbacks_; + testing::NiceMock secret_manager_; + Network::ClientConnectionPtr ssl_client_; +}; + +// Each listener filter is enabled by default. +TEST_P(ListenerFilterIntegrationTest, AllListenerFiltersAreEnabledByDefault) { + setupConnections(/*listener_filter_disabled=*/false, /*expect_connection_open=*/true); + ssl_client_->close(Network::ConnectionCloseType::NoFlush); +} + +// The tls_inspector is disabled. The ALPN won't be sniffed out and no filter chain is matched. +TEST_P(ListenerFilterIntegrationTest, DisabledTlsInspectorFailsFilterChainFind) { + setupConnections(/*listener_filter_disabled=*/true, /*expect_connection_open=*/false); +} + +INSTANTIATE_TEST_SUITE_P(IpVersions, ListenerFilterIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); +} // namespace +} // namespace Envoy diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index d7829f51a642..d051884e2629 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -39,6 +39,7 @@ #include "gtest/gtest.h" using testing::HasSubstr; +using testing::Not; namespace Envoy { @@ -104,6 +105,8 @@ TEST_P(ProtocolIntegrationTest, ShutdownWithActiveConnPoolConnections) { // Change the default route to be restrictive, and send a request to an alternate route. TEST_P(ProtocolIntegrationTest, RouterNotFound) { testRouterNotFound(); } +TEST_P(ProtocolIntegrationTest, RouterVirtualClusters) { testRouterVirtualClusters(); } + // Change the default route to be restrictive, and send a POST to an alternate route. TEST_P(DownstreamProtocolIntegrationTest, RouterNotFoundBodyNoBuffer) { testRouterNotFoundWithBody(); @@ -226,7 +229,7 @@ name: add-trailers-filter // Add a health check filter and verify correct behavior when draining. TEST_P(ProtocolIntegrationTest, DrainClose) { - config_helper_.addFilter(ConfigHelper::DEFAULT_HEALTH_CHECK_FILTER); + config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); test_server_->drainManager().draining_ = true; @@ -632,6 +635,85 @@ TEST_P(ProtocolIntegrationTest, TwoRequests) { testTwoRequests(); } TEST_P(ProtocolIntegrationTest, TwoRequestsWithForcedBackup) { testTwoRequests(true); } +// Verify that headers with underscores in their names are dropped from client requests +// but remain in upstream responses. +TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresDropped) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_common_http_protocol_options()->set_headers_with_underscores_action( + envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER); + }); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"foo_bar", "baz"}}); + waitForNextUpstreamRequest(); + + EXPECT_THAT(upstream_request_->headers(), Not(HeaderHasValueRef("foo_bar", "baz"))); + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, {"bar_baz", "fooz"}}, true); + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_THAT(response->headers(), HeaderHasValueRef("bar_baz", "fooz")); +} + +// Verify that by default headers with underscores in their names remain in both requests and +// responses when allowed in configuration. +TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresRemainByDefault) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"foo_bar", "baz"}}); + waitForNextUpstreamRequest(); + + EXPECT_THAT(upstream_request_->headers(), HeaderHasValueRef("foo_bar", "baz")); + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, {"bar_baz", "fooz"}}, true); + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_THAT(response->headers(), HeaderHasValueRef("bar_baz", "fooz")); +} + +// Verify that request with headers containing underscores is rejected when configured. +TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefault) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_common_http_protocol_options()->set_headers_with_underscores_action( + envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST); + }); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"foo_bar", "baz"}}); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + } else { + response->waitForReset(); + codec_client_->close(); + ASSERT_TRUE(response->reset()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); + } +} + TEST_P(DownstreamProtocolIntegrationTest, ValidZeroLengthContent) { initialize(); diff --git a/test/integration/proxy_proto_integration_test.cc b/test/integration/proxy_proto_integration_test.cc index c10323e75737..ec62a8991a6a 100644 --- a/test/integration/proxy_proto_integration_test.cc +++ b/test/integration/proxy_proto_integration_test.cc @@ -26,7 +26,7 @@ TEST_P(ProxyProtoIntegrationTest, V1RouterRequestAndResponseWithBodyNoBuffer) { return conn; }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } TEST_P(ProxyProtoIntegrationTest, V2RouterRequestAndResponseWithBodyNoBuffer) { @@ -40,7 +40,7 @@ TEST_P(ProxyProtoIntegrationTest, V2RouterRequestAndResponseWithBodyNoBuffer) { return conn; }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } TEST_P(ProxyProtoIntegrationTest, V1RouterRequestAndResponseWithBodyNoBufferV6) { @@ -51,7 +51,7 @@ TEST_P(ProxyProtoIntegrationTest, V1RouterRequestAndResponseWithBodyNoBufferV6) return conn; }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } TEST_P(ProxyProtoIntegrationTest, V2RouterRequestAndResponseWithBodyNoBufferV6) { @@ -67,7 +67,7 @@ TEST_P(ProxyProtoIntegrationTest, V2RouterRequestAndResponseWithBodyNoBufferV6) return conn; }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } TEST_P(ProxyProtoIntegrationTest, RouterProxyUnknownRequestAndResponseWithBodyNoBuffer) { @@ -78,7 +78,7 @@ TEST_P(ProxyProtoIntegrationTest, RouterProxyUnknownRequestAndResponseWithBodyNo return conn; }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } TEST_P(ProxyProtoIntegrationTest, RouterProxyUnknownLongRequestAndResponseWithBodyNoBuffer) { @@ -89,7 +89,7 @@ TEST_P(ProxyProtoIntegrationTest, RouterProxyUnknownLongRequestAndResponseWithBo return conn; }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } // Test that %DOWNSTREAM_DIRECT_REMOTE_ADDRESS%/%DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT% @@ -110,7 +110,7 @@ TEST_P(ProxyProtoIntegrationTest, AccessLog) { return conn; }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); const std::string log_line = waitForAccessLog(access_log_name_); const std::vector tokens = StringUtil::splitToken(log_line, " "); @@ -147,7 +147,7 @@ TEST_P(ProxyProtoIntegrationTest, DEPRECATED_FEATURE_TEST(OriginalDst)) { return conn; }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } TEST_P(ProxyProtoIntegrationTest, ClusterProvided) { @@ -177,7 +177,7 @@ TEST_P(ProxyProtoIntegrationTest, ClusterProvided) { return conn; }; - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } } // namespace Envoy diff --git a/test/integration/sds_static_integration_test.cc b/test/integration/sds_static_integration_test.cc index 8b488b0f58e1..f0a92b8e19c7 100644 --- a/test/integration/sds_static_integration_test.cc +++ b/test/integration/sds_static_integration_test.cc @@ -104,7 +104,7 @@ TEST_P(SdsStaticDownstreamIntegrationTest, RouterRequestAndResponseWithGiantBody ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection(); }; - testRouterRequestAndResponseWithBody(16 * 1024 * 1024, 16 * 1024 * 1024, false, &creator); + testRouterRequestAndResponseWithBody(16 * 1024 * 1024, 16 * 1024 * 1024, false, false, &creator); } class SdsStaticUpstreamIntegrationTest : public testing::TestWithParam, @@ -160,7 +160,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, SdsStaticUpstreamIntegrationTest, TestUtility::ipTestParamsToString); TEST_P(SdsStaticUpstreamIntegrationTest, RouterRequestAndResponseWithGiantBodyBuffer) { - testRouterRequestAndResponseWithBody(16 * 1024 * 1024, 16 * 1024 * 1024, false, nullptr); + testRouterRequestAndResponseWithBody(16 * 1024 * 1024, 16 * 1024 * 1024, false, false, nullptr); } } // namespace Ssl diff --git a/test/integration/server.h b/test/integration/server.h index 7741e8cdc214..13dfbe451147 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -102,15 +102,15 @@ class TestScopeWrapper : public Scope { return wrapped_scope_->nullGauge(str); } - Counter& counter(const std::string& name) override { + Counter& counterFromString(const std::string& name) override { StatNameManagedStorage storage(name, symbolTable()); return counterFromStatName(storage.statName()); } - Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) override { + Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override { StatNameManagedStorage storage(name, symbolTable()); return gaugeFromStatName(storage.statName(), import_mode); } - Histogram& histogram(const std::string& name, Histogram::Unit unit) override { + Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override { StatNameManagedStorage storage(name, symbolTable()); return histogramFromStatName(storage.statName(), unit); } @@ -150,9 +150,9 @@ class TestIsolatedStoreImpl : public StoreRoot { Thread::LockGuard lock(lock_); return store_.counterFromStatNameWithTags(name, tags); } - Counter& counter(const std::string& name) override { + Counter& counterFromString(const std::string& name) override { Thread::LockGuard lock(lock_); - return store_.counter(name); + return store_.counterFromString(name); } ScopePtr createScope(const std::string& name) override { Thread::LockGuard lock(lock_); @@ -164,9 +164,9 @@ class TestIsolatedStoreImpl : public StoreRoot { Thread::LockGuard lock(lock_); return store_.gaugeFromStatNameWithTags(name, tags, import_mode); } - Gauge& gauge(const std::string& name, Gauge::ImportMode import_mode) override { + Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override { Thread::LockGuard lock(lock_); - return store_.gauge(name, import_mode); + return store_.gaugeFromString(name, import_mode); } Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags, Histogram::Unit unit) override { @@ -174,9 +174,9 @@ class TestIsolatedStoreImpl : public StoreRoot { return store_.histogramFromStatNameWithTags(name, tags, unit); } NullGaugeImpl& nullGauge(const std::string& name) override { return store_.nullGauge(name); } - Histogram& histogram(const std::string& name, Histogram::Unit unit) override { + Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override { Thread::LockGuard lock(lock_); - return store_.histogram(name, unit); + return store_.histogramFromString(name, unit); } CounterOptConstRef findCounter(StatName name) const override { Thread::LockGuard lock(lock_); diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index ce12a2bde172..c08092a4b47d 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -219,8 +219,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // A unique instance of ClusterMemoryTest allows for multiple runs of Envoy with // differing configuration. This is necessary for measuring the memory consumption // between the different instances within the same test. - const size_t m1000 = ClusterMemoryTestHelper::computeMemoryDelta(1, 0, 1001, 0, true); - const size_t m_per_cluster = (m1000) / 1000; + const size_t m100 = ClusterMemoryTestHelper::computeMemoryDelta(1, 0, 101, 0, true); + const size_t m_per_cluster = (m100) / 100; // Note: if you are increasing this golden value because you are adding a // stat, please confirm that this will be generally useful to most Envoy @@ -268,6 +268,9 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/02/13 10042 43797 44136 Metadata: Metadata are shared across different // clusters and hosts. // 2020/03/16 9964 44085 44600 http2: support custom SETTINGS parameters. + // 2020/03/24 10501 44261 44600 upstream: upstream_rq_retry_limit_exceeded. + // 2020/04/02 10624 43356 44000 Use 100 clusters rather than 1000 to avoid timeouts + // 2020/04/07 10661 43349 44000 fix clang tidy on master // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -281,8 +284,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 44085); - EXPECT_MEMORY_LE(m_per_cluster, 44600); + EXPECT_MEMORY_EQ(m_per_cluster, 43349); + EXPECT_MEMORY_LE(m_per_cluster, 44000); } TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { @@ -291,8 +294,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // A unique instance of ClusterMemoryTest allows for multiple runs of Envoy with // differing configuration. This is necessary for measuring the memory consumption // between the different instances within the same test. - const size_t m1000 = ClusterMemoryTestHelper::computeMemoryDelta(1, 0, 1001, 0, true); - const size_t m_per_cluster = (m1000) / 1000; + const size_t m100 = ClusterMemoryTestHelper::computeMemoryDelta(1, 0, 101, 0, true); + const size_t m_per_cluster = (m100) / 100; // Note: if you are increasing this golden value because you are adding a // stat, please confirm that this will be generally useful to most Envoy @@ -323,6 +326,9 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/01/12 9633 35932 36500 config: support recovery of original message when // upgrading. // 2020/03/16 9964 36220 36800 http2: support custom SETTINGS parameters. + // 2020/03/24 10501 36300 36800 upstream: upstream_rq_retry_limit_exceeded. + // 2020/04/02 10624 35564 36000 Use 100 clusters rather than 1000 to avoid timeouts + // 2020/04/07 10661 35557 36000 fix clang tidy on master // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -336,8 +342,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 36220); - EXPECT_MEMORY_LE(m_per_cluster, 36800); + EXPECT_MEMORY_EQ(m_per_cluster, 35557); + EXPECT_MEMORY_LE(m_per_cluster, 36000); } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { @@ -346,8 +352,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { // A unique instance of ClusterMemoryTest allows for multiple runs of Envoy with // differing configuration. This is necessary for measuring the memory consumption // between the different instances within the same test. - const size_t m1000 = ClusterMemoryTestHelper::computeMemoryDelta(1, 1, 1, 1001, true); - const size_t m_per_host = (m1000) / 1000; + const size_t m100 = ClusterMemoryTestHelper::computeMemoryDelta(1, 1, 1, 101, true); + const size_t m_per_host = (m100) / 100; // Note: if you are increasing this golden value because you are adding a // stat, please confirm that this will be generally useful to most Envoy @@ -368,6 +374,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { // 2020/01/13 9663 1619 1655 api: deprecate hosts in Cluster. // 2020/02/13 10042 1363 1655 Metadata object are shared across different clusters // and hosts. + // 2020/04/02 10624 1380 1655 Use 100 clusters rather than 1000 to avoid timeouts // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -377,7 +384,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_host, 1363); + EXPECT_MEMORY_EQ(m_per_host, 1380); EXPECT_MEMORY_LE(m_per_host, 1655); } diff --git a/test/integration/tcp_conn_pool_integration_test.cc b/test/integration/tcp_conn_pool_integration_test.cc index 52ba5cfe2193..03dbc5f9530b 100644 --- a/test/integration/tcp_conn_pool_integration_test.cc +++ b/test/integration/tcp_conn_pool_integration_test.cc @@ -112,13 +112,13 @@ class TcpConnPoolIntegrationTest : public testing::TestWithParammutable_typed_config()->PackFrom(access_log_config); + auto* runtime_filter = access_log->mutable_filter()->mutable_runtime_filter(); + runtime_filter->set_runtime_key("unused-key"); + auto* percent_sampled = runtime_filter->mutable_percent_sampled(); + percent_sampled->set_numerator(100); + percent_sampled->set_denominator( + envoy::type::FractionalPercent::DenominatorType::FractionalPercent_DenominatorType_HUNDRED); config_blob->PackFrom(tcp_proxy_config); }); initialize(); @@ -295,8 +301,9 @@ TEST_P(TcpProxyIntegrationTest, AccessLog) { // Test that all three addresses were populated correctly. Only check the first line of // log output for simplicity. EXPECT_THAT(log_result, - MatchesRegex(fmt::format("upstreamlocal={0} upstreamhost={0} downstream={1}\r?\n.*", - ip_port_regex, ip_regex))); + MatchesRegex(fmt::format( + "upstreamlocal={0} upstreamhost={0} downstream={1} sent=5 received=0\r?\n.*", + ip_port_regex, ip_regex))); } // Test that the server shuts down without crashing when connections are open. @@ -385,7 +392,7 @@ TEST_P(TcpProxyIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { class TcpProxyMetadataMatchIntegrationTest : public TcpProxyIntegrationTest { public: - void initialize(); + void initialize() override; void expectEndpointToMatchRoute(); void expectEndpointNotToMatchRoute(); @@ -676,7 +683,7 @@ void TcpProxySslIntegrationTest::initialize() { context_manager_ = std::make_unique(timeSystem()); - payload_reader_.reset(new WaitForPayloadReader(*dispatcher_)); + payload_reader_ = std::make_shared(*dispatcher_); } void TcpProxySslIntegrationTest::setupConnections() { @@ -805,5 +812,4 @@ TEST_P(TcpProxySslIntegrationTest, UpstreamHalfClose) { ASSERT_TRUE(fake_upstream_connection_->waitForHalfClose()); } -} // namespace } // namespace Envoy diff --git a/test/integration/tcp_proxy_integration_test.h b/test/integration/tcp_proxy_integration_test.h index e5f675050122..6504befc3630 100644 --- a/test/integration/tcp_proxy_integration_test.h +++ b/test/integration/tcp_proxy_integration_test.h @@ -9,11 +9,11 @@ #include "gtest/gtest.h" namespace Envoy { -namespace { + class TcpProxyIntegrationTest : public testing::TestWithParam, public BaseIntegrationTest { public: - TcpProxyIntegrationTest() : BaseIntegrationTest(GetParam(), ConfigHelper::TCP_PROXY_CONFIG) { + TcpProxyIntegrationTest() : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) { enable_half_close_ = true; } @@ -42,5 +42,4 @@ class TcpProxySslIntegrationTest : public TcpProxyIntegrationTest { FakeRawConnectionPtr fake_upstream_connection_; }; -} // namespace } // namespace Envoy diff --git a/test/integration/transport_socket_match_integration_test.cc b/test/integration/transport_socket_match_integration_test.cc index afb4208c9738..e074af1a85d0 100644 --- a/test/integration/transport_socket_match_integration_test.cc +++ b/test/integration/transport_socket_match_integration_test.cc @@ -20,7 +20,7 @@ class TransportSockeMatchIntegrationTest : public testing::Test, public HttpInte TransportSockeMatchIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, TestEnvironment::getIpVersionsForTest().front(), - ConfigHelper::HTTP_PROXY_CONFIG), + ConfigHelper::httpProxyConfig()), num_hosts_{2} { autonomous_upstream_ = true; setUpstreamCount(num_hosts_); diff --git a/test/integration/uds_integration_test.cc b/test/integration/uds_integration_test.cc index ddb9abc68ad6..e1e63b8fbfb5 100644 --- a/test/integration/uds_integration_test.cc +++ b/test/integration/uds_integration_test.cc @@ -111,7 +111,7 @@ TEST_P(UdsListenerIntegrationTest, TestPeerCredentials) { TEST_P(UdsListenerIntegrationTest, RouterRequestAndResponseWithBodyNoBuffer) { ConnectionCreationFunction creator = createConnectionFn(); - testRouterRequestAndResponseWithBody(1024, 512, false, &creator); + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); } TEST_P(UdsListenerIntegrationTest, RouterHeaderOnlyRequestAndResponse) { diff --git a/test/integration/websocket_integration_test.cc b/test/integration/websocket_integration_test.cc index 1048df5ce35f..828dd658f2c8 100644 --- a/test/integration/websocket_integration_test.cc +++ b/test/integration/websocket_integration_test.cc @@ -343,7 +343,7 @@ TEST_P(WebsocketIntegrationTest, WebsocketCustomFilterChain) { // Add a small buffer filter to the standard HTTP filter chain. Websocket // upgrades will use the HTTP filter chain so will also have small buffers. - config_helper_.addFilter(ConfigHelper::SMALL_BUFFER_FILTER); + config_helper_.addFilter(ConfigHelper::smallBufferFilter()); // Add a second upgrade type which goes directly to the router filter. config_helper_.addConfigModifier( diff --git a/test/mocks/api/mocks.h b/test/mocks/api/mocks.h index f1618d3702e4..c52111296301 100644 --- a/test/mocks/api/mocks.h +++ b/test/mocks/api/mocks.h @@ -65,6 +65,9 @@ class MockOsSysCalls : public OsSysCallsImpl { MOCK_METHOD(SysCallSizeResult, readv, (os_fd_t, const iovec*, int)); MOCK_METHOD(SysCallSizeResult, recv, (os_fd_t socket, void* buffer, size_t length, int flags)); MOCK_METHOD(SysCallSizeResult, recvmsg, (os_fd_t socket, msghdr* msg, int flags)); + MOCK_METHOD(SysCallIntResult, recvmmsg, + (os_fd_t socket, struct mmsghdr* msgvec, unsigned int vlen, int flags, + struct timespec* timeout)); MOCK_METHOD(SysCallIntResult, ftruncate, (int fd, off_t length)); MOCK_METHOD(SysCallPtrResult, mmap, (void* addr, size_t length, int prot, int flags, int fd, off_t offset)); @@ -84,6 +87,7 @@ class MockOsSysCalls : public OsSysCallsImpl { MOCK_METHOD(SysCallIntResult, socketpair, (int domain, int type, int protocol, os_fd_t sv[2])); MOCK_METHOD(SysCallIntResult, listen, (os_fd_t sockfd, int backlog)); MOCK_METHOD(SysCallSizeResult, write, (os_fd_t sockfd, const void* buffer, size_t length)); + MOCK_METHOD(bool, supportsMmsg, (), (const)); // Map from (sockfd,level,optname) to boolean socket option. using SockOptKey = std::tuple; diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index 08219ef98b7f..a9ca86d994de 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -34,9 +34,9 @@ class MockDispatcher : public Dispatcher { // Dispatcher TimeSource& timeSource() override { return time_system_; } - Network::ConnectionPtr - createServerConnection(Network::ConnectionSocketPtr&& socket, - Network::TransportSocketPtr&& transport_socket) override { + Network::ConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket, + Network::TransportSocketPtr&& transport_socket, + StreamInfo::StreamInfo&) override { // The caller expects both the socket and the transport socket to be moved. socket.reset(); transport_socket.reset(); diff --git a/test/mocks/grpc/mocks.h b/test/mocks/grpc/mocks.h index d71bac5694c7..bbf8f6d1b60d 100644 --- a/test/mocks/grpc/mocks.h +++ b/test/mocks/grpc/mocks.h @@ -74,7 +74,7 @@ class MockAsyncStreamCallbacks : public AsyncStreamCallbacks { class MockAsyncClient : public RawAsyncClient { public: MockAsyncClient(); - ~MockAsyncClient(); + ~MockAsyncClient() override; MOCK_METHOD(AsyncRequest*, sendRaw, (absl::string_view service_full_name, absl::string_view method_name, diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index cc4fdbd2be46..0edb60a75980 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -233,6 +233,7 @@ class MockStreamEncoderFilterCallbacks : public StreamEncoderFilterCallbacks, MOCK_METHOD(void, continueEncoding, ()); MOCK_METHOD(const Buffer::Instance*, encodingBuffer, ()); MOCK_METHOD(void, modifyEncodingBuffer, (std::function)); + MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); Buffer::InstancePtr buffer_; testing::NiceMock active_span_; @@ -334,11 +335,15 @@ class MockAsyncClientCallbacks : public AsyncClient::Callbacks { MockAsyncClientCallbacks(); ~MockAsyncClientCallbacks() override; - void onSuccess(ResponseMessagePtr&& response) override { onSuccess_(response.get()); } + void onSuccess(const Http::AsyncClient::Request& request, + ResponseMessagePtr&& response) override { + onSuccess_(request, response.get()); + } // Http::AsyncClient::Callbacks - MOCK_METHOD(void, onSuccess_, (ResponseMessage * response)); - MOCK_METHOD(void, onFailure, (Http::AsyncClient::FailureReason reason)); + MOCK_METHOD(void, onSuccess_, (const Http::AsyncClient::Request&, ResponseMessage*)); + MOCK_METHOD(void, onFailure, + (const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason)); }; class MockAsyncClientStreamCallbacks : public AsyncClient::StreamCallbacks { diff --git a/test/mocks/http/stream_decoder.h b/test/mocks/http/stream_decoder.h index 689ccfa9fcf3..2abbe175aaa4 100644 --- a/test/mocks/http/stream_decoder.h +++ b/test/mocks/http/stream_decoder.h @@ -21,7 +21,7 @@ class MockStreamDecoder : public virtual StreamDecoder { class MockRequestDecoder : public MockStreamDecoder, public RequestDecoder { public: MockRequestDecoder(); - ~MockRequestDecoder(); + ~MockRequestDecoder() override; void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override { decodeHeaders_(headers, end_stream); @@ -36,7 +36,7 @@ class MockRequestDecoder : public MockStreamDecoder, public RequestDecoder { class MockResponseDecoder : public MockStreamDecoder, public ResponseDecoder { public: MockResponseDecoder(); - ~MockResponseDecoder(); + ~MockResponseDecoder() override; void decode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override { decode100ContinueHeaders_(headers); diff --git a/test/mocks/http/stream_encoder.cc b/test/mocks/http/stream_encoder.cc index 0268efa793b0..a76fffaace59 100644 --- a/test/mocks/http/stream_encoder.cc +++ b/test/mocks/http/stream_encoder.cc @@ -6,6 +6,9 @@ using testing::Invoke; namespace Envoy { namespace Http { +MockHttp1StreamEncoderOptions::MockHttp1StreamEncoderOptions() = default; +MockHttp1StreamEncoderOptions::~MockHttp1StreamEncoderOptions() = default; + MockStreamEncoder::MockStreamEncoder() { ON_CALL(*this, getStream()).WillByDefault(ReturnRef(stream_)); } diff --git a/test/mocks/http/stream_encoder.h b/test/mocks/http/stream_encoder.h index f97546c67702..768951a411b4 100644 --- a/test/mocks/http/stream_encoder.h +++ b/test/mocks/http/stream_encoder.h @@ -9,6 +9,14 @@ namespace Envoy { namespace Http { +class MockHttp1StreamEncoderOptions : public Http1StreamEncoderOptions { +public: + MockHttp1StreamEncoderOptions(); + ~MockHttp1StreamEncoderOptions() override; + + MOCK_METHOD(void, disableChunkEncoding, ()); +}; + class MockStreamEncoder : public virtual StreamEncoder { public: MockStreamEncoder(); @@ -18,6 +26,7 @@ class MockStreamEncoder : public virtual StreamEncoder { MOCK_METHOD(void, encodeData, (Buffer::Instance & data, bool end_stream)); MOCK_METHOD(void, encodeMetadata, (const MetadataMapVector& metadata_map_vector)); MOCK_METHOD(Stream&, getStream, ()); + MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); testing::NiceMock stream_; }; @@ -25,7 +34,7 @@ class MockStreamEncoder : public virtual StreamEncoder { class MockRequestEncoder : public MockStreamEncoder, public RequestEncoder { public: MockRequestEncoder(); - ~MockRequestEncoder(); + ~MockRequestEncoder() override; // Http::RequestEncoder MOCK_METHOD(void, encodeHeaders, (const RequestHeaderMap& headers, bool end_stream)); @@ -35,7 +44,7 @@ class MockRequestEncoder : public MockStreamEncoder, public RequestEncoder { class MockResponseEncoder : public MockStreamEncoder, public ResponseEncoder { public: MockResponseEncoder(); - ~MockResponseEncoder(); + ~MockResponseEncoder() override; // Http::ResponseEncoder MOCK_METHOD(void, encode100ContinueHeaders, (const ResponseHeaderMap& headers)); diff --git a/test/mocks/network/BUILD b/test/mocks/network/BUILD index 330dad5436da..877861539c9d 100644 --- a/test/mocks/network/BUILD +++ b/test/mocks/network/BUILD @@ -26,6 +26,7 @@ envoy_cc_mock( hdrs = ["io_handle.h"], deps = [ "//include/envoy/network:io_handle_interface", + "//source/common/buffer:buffer_lib", ], ) diff --git a/test/mocks/network/io_handle.h b/test/mocks/network/io_handle.h index 3e6be1fe4216..787ea202f464 100644 --- a/test/mocks/network/io_handle.h +++ b/test/mocks/network/io_handle.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/buffer/buffer.h" #include "envoy/network/io_handle.h" #include "gmock/gmock.h" @@ -10,7 +11,7 @@ namespace Network { class MockIoHandle : public IoHandle { public: MockIoHandle(); - ~MockIoHandle(); + ~MockIoHandle() override; MOCK_METHOD(int, fd, (), (const)); MOCK_METHOD(Api::IoCallUint64Result, close, ()); @@ -25,6 +26,9 @@ class MockIoHandle : public IoHandle { MOCK_METHOD(Api::IoCallUint64Result, recvmsg, (Buffer::RawSlice * slices, const uint64_t num_slice, uint32_t self_port, RecvMsgOutput& output)); + MOCK_METHOD(Api::IoCallUint64Result, recvmmsg, + (RawSliceArrays & slices, uint32_t self_port, RecvMsgOutput& output)); + MOCK_METHOD(bool, supportsMmsg, (), (const)); }; } // namespace Network diff --git a/test/mocks/network/mocks.cc b/test/mocks/network/mocks.cc index b0a47496cd11..81f9d03721c2 100644 --- a/test/mocks/network/mocks.cc +++ b/test/mocks/network/mocks.cc @@ -156,7 +156,7 @@ MockConnectionSocket::MockConnectionSocket() MockConnectionSocket::~MockConnectionSocket() = default; -MockListener::MockListener() {} +MockListener::MockListener() = default; MockListener::~MockListener() { onDestroy(); } @@ -195,5 +195,8 @@ MockUdpListenerFilterManager::~MockUdpListenerFilterManager() = default; MockConnectionBalancer::MockConnectionBalancer() = default; MockConnectionBalancer::~MockConnectionBalancer() = default; +MockListenerFilterMatcher::MockListenerFilterMatcher() = default; +MockListenerFilterMatcher::~MockListenerFilterMatcher() = default; + } // namespace Network } // namespace Envoy diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 131b6d220b02..3efea4912b35 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -165,19 +165,24 @@ class MockListenerFilterManager : public ListenerFilterManager { MockListenerFilterManager(); ~MockListenerFilterManager() override; - void addAcceptFilter(ListenerFilterPtr&& filter) override { addAcceptFilter_(filter); } + void addAcceptFilter(const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + ListenerFilterPtr&& filter) override { + addAcceptFilter_(listener_filter_matcher, filter); + } - MOCK_METHOD(void, addAcceptFilter_, (Network::ListenerFilterPtr&)); + MOCK_METHOD(void, addAcceptFilter_, + (const Network::ListenerFilterMatcherSharedPtr&, Network::ListenerFilterPtr&)); }; -class MockFilterChain : public FilterChain { +class MockFilterChain : public DrainableFilterChain { public: MockFilterChain(); ~MockFilterChain() override; - // Network::FilterChain + // Network::DrainableFilterChain MOCK_METHOD(const TransportSocketFactory&, transportSocketFactory, (), (const)); MOCK_METHOD(const std::vector&, networkFilterFactories, (), (const)); + MOCK_METHOD(void, startDraining, ()); }; class MockFilterChainManager : public FilterChainManager { @@ -320,11 +325,16 @@ class MockListenerConfig : public ListenerConfig { return envoy::config::core::v3::UNSPECIFIED; } + const std::vector& accessLogs() const override { + return empty_access_logs_; + } + testing::NiceMock filter_chain_factory_; MockListenSocketFactory socket_factory_; SocketSharedPtr socket_; Stats::IsolatedStoreImpl scope_; std::string name_; + const std::vector empty_access_logs_; }; class MockListener : public Listener { @@ -464,5 +474,11 @@ class MockConnectionBalancer : public ConnectionBalancer { (BalancedConnectionHandler & current_handler)); }; +class MockListenerFilterMatcher : public ListenerFilterMatcher { +public: + MockListenerFilterMatcher(); + ~MockListenerFilterMatcher() override; + MOCK_METHOD(bool, matches, (Network::ListenerFilterCallbacks & cb), (const)); +}; } // namespace Network } // namespace Envoy diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index b5ef51ca4dbc..9ed7b8ead74b 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -225,9 +225,12 @@ class TestVirtualCluster : public VirtualCluster { public: // Router::VirtualCluster Stats::StatName statName() const override { return stat_name_.statName(); } + VirtualClusterStats& stats() const override { return stats_; } Stats::TestSymbolTable symbol_table_; Stats::StatNameManagedStorage stat_name_{"fake_virtual_cluster", *symbol_table_}; + Stats::IsolatedStoreImpl stats_store_; + mutable VirtualClusterStats stats_{generateStats(stats_store_)}; }; class MockVirtualHost : public VirtualHost { diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc index 52d822dd4746..f003adf48361 100644 --- a/test/mocks/server/mocks.cc +++ b/test/mocks/server/mocks.cc @@ -213,6 +213,7 @@ MockServerFactoryContext::MockServerFactoryContext() ON_CALL(*this, messageValidationVisitor()) .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_)); } MockServerFactoryContext::~MockServerFactoryContext() = default; diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index f05c200064dd..57515cf77fce 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -174,6 +174,7 @@ class MockAdminStream : public AdminStream { MOCK_METHOD(Http::RequestHeaderMap&, getRequestHeaders, (), (const)); MOCK_METHOD(NiceMock&, getDecoderFilterCallbacks, (), (const)); + MOCK_METHOD(Http::Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); }; class MockDrainManager : public DrainManager { @@ -494,6 +495,7 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); MOCK_METHOD(Api::Api&, api, ()); Grpc::Context& grpcContext() override { return grpc_context_; } + MOCK_METHOD(Server::DrainManager&, drainManager, ()); testing::NiceMock cluster_manager_; testing::NiceMock dispatcher_; diff --git a/test/mocks/ssl/mocks.h b/test/mocks/ssl/mocks.h index a4af7efe125a..3aed0577db6e 100644 --- a/test/mocks/ssl/mocks.h +++ b/test/mocks/ssl/mocks.h @@ -111,6 +111,7 @@ class MockServerContextConfig : public ServerContextConfig { MOCK_METHOD(bool, requireClientCertificate, (), (const)); MOCK_METHOD(const std::vector&, sessionTicketKeys, (), (const)); + MOCK_METHOD(bool, disableStatelessSessionResumption, (), (const)); }; class MockPrivateKeyMethodManager : public PrivateKeyMethodManager { diff --git a/test/mocks/stats/mocks.cc b/test/mocks/stats/mocks.cc index d706e87fc778..595b2bd779e2 100644 --- a/test/mocks/stats/mocks.cc +++ b/test/mocks/stats/mocks.cc @@ -64,7 +64,7 @@ MockMetricSnapshot::~MockMetricSnapshot() = default; MockSink::MockSink() = default; MockSink::~MockSink() = default; -MockStore::MockStore() : StoreImpl(*global_symbol_table_) { +MockStore::MockStore() : TestUtil::TestStore(*global_symbol_table_) { ON_CALL(*this, counter(_)).WillByDefault(ReturnRef(counter_)); ON_CALL(*this, histogram(_, _)) .WillByDefault(Invoke([this](const std::string& name, Histogram::Unit unit) -> Histogram& { diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 9fa00ac5a266..e4336af3069a 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -273,7 +273,7 @@ class SymbolTableProvider { TestSymbolTable global_symbol_table_; }; -class MockStore : public SymbolTableProvider, public StoreImpl { +class MockStore : public SymbolTableProvider, public TestUtil::TestStore { public: MockStore(); ~MockStore() override; diff --git a/test/mocks/stream_info/BUILD b/test/mocks/stream_info/BUILD index c6d56c5ac654..6d33901f6cf1 100644 --- a/test/mocks/stream_info/BUILD +++ b/test/mocks/stream_info/BUILD @@ -13,6 +13,7 @@ envoy_cc_mock( srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ + "//include/envoy/http:request_id_extension_interface", "//include/envoy/stream_info:stream_info_interface", "//include/envoy/upstream:upstream_interface", "//test/mocks/upstream:host_mocks", diff --git a/test/mocks/stream_info/mocks.cc b/test/mocks/stream_info/mocks.cc index 59219bffcc18..79cb4f41763f 100644 --- a/test/mocks/stream_info/mocks.cc +++ b/test/mocks/stream_info/mocks.cc @@ -24,6 +24,9 @@ MockStreamInfo::MockStreamInfo() ON_CALL(*this, setResponseFlag(_)).WillByDefault(Invoke([this](ResponseFlag response_flag) { response_flags_ |= response_flag; })); + ON_CALL(*this, setResponseCodeDetails(_)).WillByDefault(Invoke([this](absl::string_view details) { + response_code_details_ = std::string(details); + })); ON_CALL(*this, startTime()).WillByDefault(ReturnPointee(&start_time_)); ON_CALL(*this, startTimeMonotonic()).WillByDefault(ReturnPointee(&start_time_monotonic_)); ON_CALL(*this, lastDownstreamRxByteReceived()) diff --git a/test/mocks/stream_info/mocks.h b/test/mocks/stream_info/mocks.h index 4ca1498133ff..2c5b09562e96 100644 --- a/test/mocks/stream_info/mocks.h +++ b/test/mocks/stream_info/mocks.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/config/core/v3/base.pb.h" +#include "envoy/http/request_id_extension.h" #include "envoy/stream_info/stream_info.h" #include "common/stream_info/filter_state_impl.h" @@ -92,6 +93,8 @@ class MockStreamInfo : public StreamInfo { MOCK_METHOD(void, setUpstreamClusterInfo, (const Upstream::ClusterInfoConstSharedPtr&)); MOCK_METHOD(absl::optional, upstreamClusterInfo, (), (const)); + MOCK_METHOD(Http::RequestIDExtensionSharedPtr, getRequestIDExtension, (), (const)); + MOCK_METHOD(void, setRequestIDExtension, (Http::RequestIDExtensionSharedPtr)); std::shared_ptr> host_{ new testing::NiceMock()}; diff --git a/test/mocks/tcp/mocks.cc b/test/mocks/tcp/mocks.cc index be7f9046fdb1..8d86a1f204a0 100644 --- a/test/mocks/tcp/mocks.cc +++ b/test/mocks/tcp/mocks.cc @@ -29,6 +29,7 @@ MockInstance::MockInstance() { ON_CALL(*this, newConnection(_)).WillByDefault(Invoke([&](Callbacks& cb) -> Cancellable* { return newConnectionImpl(cb); })); + ON_CALL(*this, host()).WillByDefault(Return(host_)); } MockInstance::~MockInstance() = default; diff --git a/test/mocks/tcp/mocks.h b/test/mocks/tcp/mocks.h index 4c2a23e50030..74f5c8f85f24 100644 --- a/test/mocks/tcp/mocks.h +++ b/test/mocks/tcp/mocks.h @@ -63,6 +63,7 @@ class MockInstance : public Instance { MOCK_METHOD(void, addDrainedCallback, (DrainedCb cb)); MOCK_METHOD(void, drainConnections, ()); MOCK_METHOD(Cancellable*, newConnection, (Tcp::ConnectionPool::Callbacks & callbacks)); + MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); MockCancellable* newConnectionImpl(Callbacks& cb); void poolFailure(PoolFailureReason reason); diff --git a/test/mocks/tracing/mocks.cc b/test/mocks/tracing/mocks.cc index 6b75d70431f7..22c42b78d21d 100644 --- a/test/mocks/tracing/mocks.cc +++ b/test/mocks/tracing/mocks.cc @@ -4,7 +4,6 @@ #include "gtest/gtest.h" using testing::Return; -using testing::ReturnRef; namespace Envoy { namespace Tracing { diff --git a/test/mocks/upstream/host.h b/test/mocks/upstream/host.h index b63356ada99a..0316440e5389 100644 --- a/test/mocks/upstream/host.h +++ b/test/mocks/upstream/host.h @@ -91,6 +91,7 @@ class MockHostDescription : public HostDescription { MOCK_METHOD(const ClusterInfo&, cluster, (), (const)); MOCK_METHOD(Outlier::DetectorHostMonitor&, outlierDetector, (), (const)); MOCK_METHOD(HealthCheckHostMonitor&, healthChecker, (), (const)); + MOCK_METHOD(const std::string&, hostnameForHealthChecks, (), (const)); MOCK_METHOD(const std::string&, hostname, (), (const)); MOCK_METHOD(Network::TransportSocketFactory&, transportSocketFactory, (), (const)); MOCK_METHOD(HostStats&, stats, (), (const)); @@ -175,6 +176,7 @@ class MockHost : public Host { MOCK_METHOD(void, healthFlagSet, (HealthFlag flag)); MOCK_METHOD(void, setActiveHealthFailureType, (ActiveHealthFailureType type)); MOCK_METHOD(Host::Health, health, (), (const)); + MOCK_METHOD(const std::string&, hostnameForHealthChecks, (), (const)); MOCK_METHOD(const std::string&, hostname, (), (const)); MOCK_METHOD(Network::TransportSocketFactory&, transportSocketFactory, (), (const)); MOCK_METHOD(Outlier::DetectorHostMonitor&, outlierDetector, (), (const)); diff --git a/test/server/BUILD b/test/server/BUILD index d409bcf0f2f0..3611bf168841 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -2,9 +2,10 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", "envoy_cc_fuzz_test", "envoy_cc_test", - "envoy_cc_test_binary", "envoy_cc_test_library", "envoy_package", "envoy_select_hot_restart", @@ -396,7 +397,7 @@ envoy_cc_test( ], ) -envoy_cc_test_binary( +envoy_cc_benchmark_binary( name = "filter_chain_benchmark_test", srcs = ["filter_chain_benchmark_test.cc"], external_deps = [ @@ -413,3 +414,9 @@ envoy_cc_test_binary( "//source/extensions/transport_sockets/tls:config", ], ) + +envoy_benchmark_test( + name = "filter_chain_benchmark_test_benchmark_test", + timeout = "long", + benchmark_binary = "filter_chain_benchmark_test", +) diff --git a/test/server/config_validation/server_test.cc b/test/server/config_validation/server_test.cc index 0c556cce1df6..98def0389851 100644 --- a/test/server/config_validation/server_test.cc +++ b/test/server/config_validation/server_test.cc @@ -20,7 +20,9 @@ class ValidationServerTest : public testing::TestWithParam { directory_ = TestEnvironment::temporaryDirectory() + "/test/config_test/"; } - static void SetUpTestSuite() { SetupTestDirectory(); } + static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) + SetupTestDirectory(); + } protected: ValidationServerTest() : options_(directory_ + GetParam()) {} diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 08ae4e204804..06676a01e987 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -22,12 +22,10 @@ #include "gtest/gtest.h" using testing::_; -using testing::ByRef; using testing::HasSubstr; using testing::InSequence; using testing::Invoke; using testing::NiceMock; -using testing::Ref; using testing::Return; using testing::ReturnRef; using testing::SaveArg; @@ -41,7 +39,10 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable()), handler_(new ConnectionHandlerImpl(dispatcher_, "test")), - filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()) {} + filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), + listener_filter_matcher_(std::make_shared>()) { + ON_CALL(*listener_filter_matcher_, matches(_)).WillByDefault(Return(false)); + } class TestListener : public Network::ListenerConfig { public: @@ -91,6 +92,9 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable& accessLogs() const override { + return empty_access_logs_; + } ConnectionHandlerTest& parent_; std::shared_ptr socket_; @@ -103,6 +107,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable udp_listener_factory_; Network::ConnectionBalancerPtr connection_balancer_; + const std::vector empty_access_logs_; }; using TestListenerPtr = std::unique_ptr; @@ -148,7 +153,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable socket_factory_; Network::Address::InstanceConstSharedPtr local_address_{ new Network::Address::Ipv4Instance("127.0.0.1", 10001)}; @@ -160,6 +165,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable os_sys_calls_; TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; + std::shared_ptr> listener_filter_matcher_; }; // Verify that if a listener is removed while a rebalanced connection is in flight, we correctly @@ -360,7 +366,7 @@ TEST_F(ConnectionHandlerTest, NormalRedirect) { .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { // Insert the Mock filter. if (!redirected) { - manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + manager.addAcceptFilter(nullptr, Network::ListenerFilterPtr{test_filter}); redirected = true; } return true; @@ -421,7 +427,8 @@ TEST_F(ConnectionHandlerTest, FallbackToWildcardListener) { .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { // Insert the Mock filter. if (!redirected) { - manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + manager.addAcceptFilter(listener_filter_matcher_, + Network::ListenerFilterPtr{test_filter}); redirected = true; } return true; @@ -468,7 +475,7 @@ TEST_F(ConnectionHandlerTest, WildcardListenerWithOriginalDst) { EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { // Insert the Mock filter. - manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter}); return true; })); EXPECT_CALL(*test_filter, onAccept(_)) @@ -509,7 +516,7 @@ TEST_F(ConnectionHandlerTest, WildcardListenerWithNoOriginalDst) { EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { // Insert the Mock filter. - manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter}); return true; })); EXPECT_CALL(*test_filter, onAccept(_)).WillOnce(Return(Network::FilterStatus::Continue)); @@ -555,7 +562,7 @@ TEST_F(ConnectionHandlerTest, TransportProtocolCustom) { EXPECT_CALL(*test_filter, destroy_()); EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { - manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter}); return true; })); absl::string_view dummy = "dummy"; @@ -587,7 +594,7 @@ TEST_F(ConnectionHandlerTest, ListenerFilterTimeout) { Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { - manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter}); return true; })); EXPECT_CALL(*test_filter, onAccept(_)) @@ -632,7 +639,7 @@ TEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) { Network::MockListenerFilter* test_filter = new NiceMock(); EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { - manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter}); return true; })); EXPECT_CALL(*test_filter, onAccept(_)) @@ -677,7 +684,7 @@ TEST_F(ConnectionHandlerTest, ListenerFilterTimeoutResetOnSuccess) { Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { - manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter}); return true; })); Network::ListenerFilterCallbacks* listener_filter_cb{}; @@ -716,7 +723,7 @@ TEST_F(ConnectionHandlerTest, ListenerFilterDisabledTimeout) { Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { - manager.addAcceptFilter(Network::ListenerFilterPtr{test_filter}); + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter}); return true; })); EXPECT_CALL(*test_filter, onAccept(_)) @@ -746,8 +753,8 @@ TEST_F(ConnectionHandlerTest, ListenerFilterReportError) { Network::MockListenerFilter* last_filter = new Network::MockListenerFilter(); EXPECT_CALL(factory_, createListenerFilterChain(_)) .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { - manager.addAcceptFilter(Network::ListenerFilterPtr{first_filter}); - manager.addAcceptFilter(Network::ListenerFilterPtr{last_filter}); + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{first_filter}); + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{last_filter}); return true; })); // The first filter close the socket @@ -796,6 +803,39 @@ TEST_F(ConnectionHandlerTest, UdpListenerNoFilterThrowsException) { } } +// Listener Filter matchers works. +TEST_F(ConnectionHandlerTest, ListenerFilterWorks) { + Network::ListenerCallbacks* listener_callbacks; + auto listener = new NiceMock(); + TestListener* test_listener = + addListener(1, true, false, "test_listener", listener, &listener_callbacks); + EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + handler_->addListener(*test_listener); + + auto all_matcher = std::make_shared(); + auto* disabled_listener_filter = new Network::MockListenerFilter(); + auto* enabled_filter = new Network::MockListenerFilter(); + EXPECT_CALL(factory_, createListenerFilterChain(_)) + .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { + manager.addAcceptFilter(all_matcher, Network::ListenerFilterPtr{disabled_listener_filter}); + manager.addAcceptFilter(listener_filter_matcher_, + Network::ListenerFilterPtr{enabled_filter}); + return true; + })); + + // The all matcher matches any incoming traffic and disables the listener filter. + EXPECT_CALL(*all_matcher, matches(_)).WillOnce(Return(true)); + EXPECT_CALL(*disabled_listener_filter, onAccept(_)).Times(0); + + // The non matcher acts as if always enabled. + EXPECT_CALL(*enabled_filter, onAccept(_)).WillOnce(Return(Network::FilterStatus::Continue)); + EXPECT_CALL(*disabled_listener_filter, destroy_()); + EXPECT_CALL(*enabled_filter, destroy_()); + EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(nullptr)); + listener_callbacks->onAccept(std::make_unique>()); + EXPECT_CALL(*listener, onDestroy()); +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 04e644c07655..d099baf1e480 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -28,11 +28,11 @@ namespace Server { namespace { class MockFilterChainFactoryBuilder : public FilterChainFactoryBuilder { - std::unique_ptr + std::shared_ptr buildFilterChain(const envoy::config::listener::v3::FilterChain&, FilterChainFactoryContextCreator&) const override { // A place holder to be found - return std::make_unique(); + return std::make_shared(); } }; @@ -166,7 +166,6 @@ const char YamlSingleDstPortBottom[] = R"EOF( class FilterChainBenchmarkFixture : public benchmark::Fixture { public: - using Fixture::SetUp; void SetUp(const ::benchmark::State& state) override { int64_t input_size = state.range(0); std::vector port_chains; @@ -180,21 +179,21 @@ class FilterChainBenchmarkFixture : public benchmark::Fixture { TestUtility::loadFromYaml(listener_yaml_config_, listener_config_); filter_chains_ = listener_config_.filter_chains(); } - absl::Span filter_chains_; std::string listener_yaml_config_; envoy::config::listener::v3::Listener listener_config_; + absl::Span filter_chains_; MockFilterChainFactoryBuilder dummy_builder_; - NiceMock factory_context; - FilterChainManagerImpl filter_chain_manager_{ - std::make_shared("127.0.0.1", 1234), factory_context}; + Init::ManagerImpl init_manager_{"fcm_benchmark"}; }; +// NOLINTNEXTLINE(readability-redundant-member-init) BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest) (::benchmark::State& state) { NiceMock factory_context; for (auto _ : state) { FilterChainManagerImpl filter_chain_manager{ - std::make_shared("127.0.0.1", 1234), factory_context}; + std::make_shared("127.0.0.1", 1234), factory_context, + init_manager_}; filter_chain_manager.addFilterChain(filter_chains_, dummy_builder_, filter_chain_manager); } } @@ -209,7 +208,8 @@ BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainFindTest) } NiceMock factory_context; FilterChainManagerImpl filter_chain_manager{ - std::make_shared("127.0.0.1", 1234), factory_context}; + std::make_shared("127.0.0.1", 1234), factory_context, + init_manager_}; filter_chain_manager.addFilterChain(filter_chains_, dummy_builder_, filter_chain_manager); for (auto _ : state) { @@ -242,19 +242,18 @@ Load Average: 19.05, 9.89, 3.92 ------------------------------------------------------------------------------------------------------- Benchmark Time CPU Iterations ------------------------------------------------------------------------------------------------------- -FilterChainBenchmarkFixture/FilterChainManagerBuildTest/1 51002 ns 50998 ns 12033 -FilterChainBenchmarkFixture/FilterChainManagerBuildTest/8 205175 ns 205161 ns 3782 -FilterChainBenchmarkFixture/FilterChainManagerBuildTest/64 1400449 ns 1400328 ns 485 -FilterChainBenchmarkFixture/FilterChainManagerBuildTest/512 10488106 ns 10485949 ns 62 -FilterChainBenchmarkFixture/FilterChainManagerBuildTest/4096 118373326 ns 117786871 ns 7 -FilterChainBenchmarkFixture/FilterChainFindTest/1 209 ns 209 ns 3257004 -FilterChainBenchmarkFixture/FilterChainFindTest/8 1780 ns 1780 ns 391501 -FilterChainBenchmarkFixture/FilterChainFindTest/64 16707 ns 16705 ns 42110 -FilterChainBenchmarkFixture/FilterChainFindTest/512 150220 ns 150072 ns 4675 -FilterChainBenchmarkFixture/FilterChainFindTest/4096 2227852 ns 2227703 ns 320 +FilterChainBenchmarkFixture/FilterChainManagerBuildTest/1 136994 ns 134510 ns 5183 +FilterChainBenchmarkFixture/FilterChainManagerBuildTest/8 583649 ns 574596 ns 1207 +FilterChainBenchmarkFixture/FilterChainManagerBuildTest/64 4483799 ns 4419618 ns 157 +FilterChainBenchmarkFixture/FilterChainManagerBuildTest/512 38864048 ns 38340468 ns 19 +FilterChainBenchmarkFixture/FilterChainManagerBuildTest/4096 318686843 ns 318568578 ns 2 +FilterChainBenchmarkFixture/FilterChainFindTest/1 201 ns 201 ns 3494470 +FilterChainBenchmarkFixture/FilterChainFindTest/8 1592 ns 1592 ns 435045 +FilterChainBenchmarkFixture/FilterChainFindTest/64 16057 ns 16053 ns 44275 +FilterChainBenchmarkFixture/FilterChainFindTest/512 172423 ns 172269 ns 4253 +FilterChainBenchmarkFixture/FilterChainFindTest/4096 2676478 ns 2676167 ns 254 clang-format on */ } // namespace Server } // namespace Envoy -BENCHMARK_MAIN(); diff --git a/test/server/filter_chain_manager_impl_test.cc b/test/server/filter_chain_manager_impl_test.cc index 6f5854518b74..85a67482abfc 100644 --- a/test/server/filter_chain_manager_impl_test.cc +++ b/test/server/filter_chain_manager_impl_test.cc @@ -45,12 +45,15 @@ namespace Envoy { namespace Server { class MockFilterChainFactoryBuilder : public FilterChainFactoryBuilder { - std::unique_ptr - buildFilterChain(const envoy::config::listener::v3::FilterChain&, - FilterChainFactoryContextCreator&) const override { - // Won't dereference but requires not nullptr. - return std::make_unique(); +public: + MockFilterChainFactoryBuilder() { + ON_CALL(*this, buildFilterChain(_, _)) + .WillByDefault(Return(std::make_shared())); } + + MOCK_METHOD(std::shared_ptr, buildFilterChain, + (const envoy::config::listener::v3::FilterChain&, FilterChainFactoryContextCreator&), + (const)); }; class FilterChainManagerImplTest : public testing::Test { @@ -122,13 +125,14 @@ class FilterChainManagerImplTest : public testing::Test { keys: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a" )EOF"; + Init::ManagerImpl init_manager_{"for_filter_chain_manager_test"}; envoy::config::listener::v3::FilterChain filter_chain_template_; - MockFilterChainFactoryBuilder filter_chain_factory_builder_; + NiceMock filter_chain_factory_builder_; NiceMock parent_context_; - // Test target. FilterChainManagerImpl filter_chain_manager_{ - std::make_shared("127.0.0.1", 1234), parent_context_}; + std::make_shared("127.0.0.1", 1234), parent_context_, + init_manager_}; }; TEST_F(FilterChainManagerImplTest, FilterChainMatchNothing) { @@ -142,17 +146,80 @@ TEST_F(FilterChainManagerImplTest, AddSingleFilterChain) { EXPECT_NE(filter_chain, nullptr); } -// The current implementation generates independent contexts for the same filter chain -TEST_F(FilterChainManagerImplTest, FilterChainContextsAreUnique) { - std::set contexts; - { - for (int i = 0; i < 2; i++) { - contexts.insert( - &filter_chain_manager_.createFilterChainFactoryContext(&filter_chain_template_)); - } +TEST_F(FilterChainManagerImplTest, LookupFilterChainContextByFilterChainMessage) { + std::vector filter_chain_messages; + + for (int i = 0; i < 2; i++) { + envoy::config::listener::v3::FilterChain new_filter_chain = filter_chain_template_; + new_filter_chain.set_name(absl::StrCat("filter_chain_", i)); + // For sanity check + new_filter_chain.mutable_filter_chain_match()->mutable_destination_port()->set_value(10000 + i); + filter_chain_messages.push_back(std::move(new_filter_chain)); + } + EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(2); + filter_chain_manager_.addFilterChain( + std::vector{&filter_chain_messages[0], + &filter_chain_messages[1]}, + filter_chain_factory_builder_, filter_chain_manager_); +} + +TEST_F(FilterChainManagerImplTest, DuplicateContextsAreNotBuilt) { + std::vector filter_chain_messages; + + for (int i = 0; i < 3; i++) { + envoy::config::listener::v3::FilterChain new_filter_chain = filter_chain_template_; + new_filter_chain.set_name(absl::StrCat("filter_chain_", i)); + // For sanity check + new_filter_chain.mutable_filter_chain_match()->mutable_destination_port()->set_value(10000 + i); + filter_chain_messages.push_back(std::move(new_filter_chain)); } - EXPECT_EQ(contexts.size(), 2); + + EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(1); + filter_chain_manager_.addFilterChain( + std::vector{&filter_chain_messages[0]}, + filter_chain_factory_builder_, filter_chain_manager_); + + FilterChainManagerImpl new_filter_chain_manager{ + std::make_shared("127.0.0.1", 1234), parent_context_, + init_manager_, filter_chain_manager_}; + // The new filter chain manager maintains 3 filter chains, but only 2 filter chain context is + // built because it reuse the filter chain context in the previous filter chain manager + EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(2); + new_filter_chain_manager.addFilterChain( + std::vector{ + &filter_chain_messages[0], &filter_chain_messages[1], &filter_chain_messages[2]}, + filter_chain_factory_builder_, new_filter_chain_manager); } +TEST_F(FilterChainManagerImplTest, CreatedFilterChainFactoryContextHasIndependentDrainClose) { + std::vector filter_chain_messages; + for (int i = 0; i < 3; i++) { + envoy::config::listener::v3::FilterChain new_filter_chain = filter_chain_template_; + new_filter_chain.set_name(absl::StrCat("filter_chain_", i)); + // For sanity check + new_filter_chain.mutable_filter_chain_match()->mutable_destination_port()->set_value(10000 + i); + filter_chain_messages.push_back(std::move(new_filter_chain)); + } + auto context0 = filter_chain_manager_.createFilterChainFactoryContext(&filter_chain_messages[0]); + auto context1 = filter_chain_manager_.createFilterChainFactoryContext(&filter_chain_messages[1]); + + // Server as whole is not draining. + MockDrainManager not_a_draining_manager; + EXPECT_CALL(not_a_draining_manager, drainClose).WillRepeatedly(Return(false)); + Configuration::MockServerFactoryContext mock_server_context; + EXPECT_CALL(mock_server_context, drainManager).WillRepeatedly(ReturnRef(not_a_draining_manager)); + EXPECT_CALL(parent_context_, getServerFactoryContext) + .WillRepeatedly(ReturnRef(mock_server_context)); + + EXPECT_FALSE(context0->drainDecision().drainClose()); + EXPECT_FALSE(context1->drainDecision().drainClose()); + + // Drain filter chain 0 + auto* context_impl_0 = dynamic_cast(context0.get()); + context_impl_0->startDraining(); + + EXPECT_TRUE(context0->drainDecision().drainClose()); + EXPECT_FALSE(context1->drainDecision().drainClose()); +} } // namespace Server } // namespace Envoy diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 2f30af4894c1..f145c5ddd104 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -71,7 +71,7 @@ class GuardDogTestBase : public testing::TestWithParam { } std::unique_ptr time_system_; - Stats::IsolatedStoreImpl stats_store_; + Stats::TestUtil::TestStore stats_store_; Api::ApiPtr api_; std::unique_ptr guard_dog_; }; diff --git a/test/server/hot_restarting_parent_test.cc b/test/server/hot_restarting_parent_test.cc index 2c17d5a2ffc0..e178bb9b2ff3 100644 --- a/test/server/hot_restarting_parent_test.cc +++ b/test/server/hot_restarting_parent_test.cc @@ -62,7 +62,7 @@ TEST_F(HotRestartingParentTest, GetListenSocketsForChildNotBindPort) { } TEST_F(HotRestartingParentTest, ExportStatsToChild) { - Stats::IsolatedStoreImpl store; + Stats::TestUtil::TestStore store; MockListenerManager listener_manager; EXPECT_CALL(server_, listenerManager()).WillRepeatedly(ReturnRef(listener_manager)); EXPECT_CALL(listener_manager, numConnections()).WillRepeatedly(Return(0)); @@ -114,7 +114,7 @@ TEST_F(HotRestartingParentTest, ExportStatsToChild) { TEST_F(HotRestartingParentTest, RetainDynamicStats) { MockListenerManager listener_manager; Stats::SymbolTableImpl parent_symbol_table; - Stats::IsolatedStoreImpl parent_store(parent_symbol_table); + Stats::TestUtil::TestStore parent_store(parent_symbol_table); EXPECT_CALL(server_, listenerManager()).WillRepeatedly(ReturnRef(listener_manager)); EXPECT_CALL(listener_manager, numConnections()).WillRepeatedly(Return(0)); @@ -132,7 +132,7 @@ TEST_F(HotRestartingParentTest, RetainDynamicStats) { { Stats::SymbolTableImpl child_symbol_table; - Stats::IsolatedStoreImpl child_store(child_symbol_table); + Stats::TestUtil::TestStore child_store(child_symbol_table); Stats::StatNameDynamicPool dynamic(child_store.symbolTable()); Stats::Counter& c1 = child_store.counter("c1"); Stats::Counter& c2 = child_store.counterFromStatName(dynamic.add("c2")); diff --git a/test/server/http/BUILD b/test/server/http/BUILD index 51fa052362b6..406ba76e0c6e 100644 --- a/test/server/http/BUILD +++ b/test/server/http/BUILD @@ -36,6 +36,16 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "admin_filter_test", + srcs = ["admin_filter_test.cc"], + deps = [ + "//source/server/http:admin_filter_lib", + "//test/mocks/server:server_mocks", + "//test/test_common:environment_lib", + ], +) + envoy_cc_test( name = "config_tracker_impl_test", srcs = ["config_tracker_impl_test.cc"], diff --git a/test/server/http/admin_filter_test.cc b/test/server/http/admin_filter_test.cc new file mode 100644 index 000000000000..7dad3e63d3f4 --- /dev/null +++ b/test/server/http/admin_filter_test.cc @@ -0,0 +1,79 @@ +#include "server/http/admin_filter.h" + +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::InSequence; +using testing::NiceMock; + +namespace Envoy { +namespace Server { + +class AdminFilterTest : public testing::TestWithParam { +public: + AdminFilterTest() : filter_(adminServerCallback), request_headers_{{":path", "/"}} { + filter_.setDecoderFilterCallbacks(callbacks_); + } + + NiceMock server_; + Stats::IsolatedStoreImpl listener_scope_; + AdminFilter filter_; + NiceMock callbacks_; + Http::TestRequestHeaderMapImpl request_headers_; + + static Http::Code adminServerCallback(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::OwnedImpl& response, AdminFilter& filter) { + // silence compiler warnings for unused params + UNREFERENCED_PARAMETER(path_and_query); + UNREFERENCED_PARAMETER(response_headers); + UNREFERENCED_PARAMETER(filter); + + response.add("OK\n"); + return Http::Code::OK; + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminFilterTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AdminFilterTest, HeaderOnly) { + EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_.decodeHeaders(request_headers_, true)); +} + +TEST_P(AdminFilterTest, Body) { + InSequence s; + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_.decodeHeaders(request_headers_, false)); + Buffer::OwnedImpl data("hello"); + Http::MetadataMap metadata_map{{"metadata", "metadata"}}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map)); + EXPECT_CALL(callbacks_, addDecodedData(_, false)); + EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(data, true)); +} + +TEST_P(AdminFilterTest, Trailers) { + InSequence s; + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_.decodeHeaders(request_headers_, false)); + Buffer::OwnedImpl data("hello"); + EXPECT_CALL(callbacks_, addDecodedData(_, false)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(data, false)); + EXPECT_CALL(callbacks_, decodingBuffer()); + filter_.getRequestBody(); + EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); + Http::TestRequestTrailerMapImpl request_trailers; + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_.decodeTrailers(request_trailers)); +} + +} // namespace Server +} // namespace Envoy diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index a0da6e7d6dff..378756a6ca20 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -23,6 +23,7 @@ #include "common/stats/thread_local_store.h" #include "server/http/admin.h" +#include "server/http/admin_filter.h" #include "extensions/transport_sockets/tls/context_config_impl.h" @@ -39,7 +40,6 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using testing::_; using testing::AllOf; using testing::EndsWith; using testing::Ge; @@ -81,33 +81,6 @@ class AdminStatsTest : public testing::TestWithParam store_; }; -class AdminFilterTest : public testing::TestWithParam { -public: - AdminFilterTest() - : admin_(TestEnvironment::temporaryPath("envoy.prof"), server_), - filter_(admin_), request_headers_{{":path", "/"}} { - filter_.setDecoderFilterCallbacks(callbacks_); - } - - NiceMock server_; - Stats::IsolatedStoreImpl listener_scope_; - AdminImpl admin_; - AdminFilter filter_; - NiceMock callbacks_; - Http::TestRequestHeaderMapImpl request_headers_; -}; - -// Check default implementations the admin class picks up. -TEST_P(AdminFilterTest, MiscFunctions) { - EXPECT_EQ(false, admin_.preserveExternalRequestId()); - Http::MockFilterChainFactoryCallbacks mock_filter_chain_factory_callbacks; - EXPECT_EQ(false, - admin_.createUpgradeFilterChain("", nullptr, mock_filter_chain_factory_callbacks)); - EXPECT_TRUE(nullptr != admin_.scopedRouteConfigProvider()); - EXPECT_EQ(Http::ConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE, - admin_.serverHeaderTransformation()); -} - INSTANTIATE_TEST_SUITE_P(IpVersions, AdminStatsTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); @@ -116,8 +89,8 @@ TEST_P(AdminStatsTest, StatsAsJson) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - Stats::Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - Stats::Histogram& h2 = store_->histogram("h2", Stats::Histogram::Unit::Unspecified); + Stats::Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Stats::Histogram& h2 = store_->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 200)); h1.recordValue(200); @@ -262,8 +235,8 @@ TEST_P(AdminStatsTest, UsedOnlyStatsAsJson) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - Stats::Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - Stats::Histogram& h2 = store_->histogram("h2", Stats::Histogram::Unit::Unspecified); + Stats::Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Stats::Histogram& h2 = store_->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); EXPECT_EQ("h1", h1.name()); EXPECT_EQ("h2", h2.name()); @@ -360,8 +333,8 @@ TEST_P(AdminStatsTest, StatsAsJsonFilterString) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - Stats::Histogram& h1 = store_->histogram("h1", Stats::Histogram::Unit::Unspecified); - Stats::Histogram& h2 = store_->histogram("h2", Stats::Histogram::Unit::Unspecified); + Stats::Histogram& h1 = store_->histogramFromString("h1", Stats::Histogram::Unit::Unspecified); + Stats::Histogram& h2 = store_->histogramFromString("h2", Stats::Histogram::Unit::Unspecified); EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 200)); h1.recordValue(200); @@ -460,11 +433,11 @@ TEST_P(AdminStatsTest, UsedOnlyStatsAsJsonFilterString) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - Stats::Histogram& h1 = store_->histogram( + Stats::Histogram& h1 = store_->histogramFromString( "h1_matches", Stats::Histogram::Unit::Unspecified); // Will match, be used, and print - Stats::Histogram& h2 = store_->histogram( + Stats::Histogram& h2 = store_->histogramFromString( "h2_matches", Stats::Histogram::Unit::Unspecified); // Will match but not be used - Stats::Histogram& h3 = store_->histogram( + Stats::Histogram& h3 = store_->histogramFromString( "h3_not", Stats::Histogram::Unit::Unspecified); // Will be used but not match EXPECT_EQ("h1_matches", h1.name()); @@ -565,51 +538,13 @@ TEST_P(AdminStatsTest, UsedOnlyStatsAsJsonFilterString) { store_->shutdownThreading(); } -INSTANTIATE_TEST_SUITE_P(IpVersions, AdminFilterTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - -TEST_P(AdminFilterTest, HeaderOnly) { - EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, - filter_.decodeHeaders(request_headers_, true)); -} - -TEST_P(AdminFilterTest, Body) { - InSequence s; - - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, - filter_.decodeHeaders(request_headers_, false)); - Buffer::OwnedImpl data("hello"); - Http::MetadataMap metadata_map{{"metadata", "metadata"}}; - EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map)); - EXPECT_CALL(callbacks_, addDecodedData(_, false)); - EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); - EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(data, true)); -} - -TEST_P(AdminFilterTest, Trailers) { - InSequence s; - - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, - filter_.decodeHeaders(request_headers_, false)); - Buffer::OwnedImpl data("hello"); - EXPECT_CALL(callbacks_, addDecodedData(_, false)); - EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_.decodeData(data, false)); - EXPECT_CALL(callbacks_, decodingBuffer()); - filter_.getRequestBody(); - EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); - Http::TestRequestTrailerMapImpl request_trailers; - EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_.decodeTrailers(request_trailers)); -} - class AdminInstanceTest : public testing::TestWithParam { public: AdminInstanceTest() : address_out_path_(TestEnvironment::temporaryPath("admin.address")), cpu_profile_path_(TestEnvironment::temporaryPath("envoy.prof")), admin_(cpu_profile_path_, server_), request_headers_{{":path", "/"}}, - admin_filter_(admin_) { + admin_filter_(admin_.createCallbackFunction()) { admin_.startHttpListener("/dev/null", address_out_path_, Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, listener_scope_.createScope("listener.admin.")); diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 9dd43cabf762..0fcc9dfcdef3 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -440,7 +440,7 @@ class TestStatsConfigFactory : public Configuration::NamedNetworkFilterConfigFac private: Network::FilterFactoryCb commonFilterFactory(Configuration::FactoryContext& context) { - context.scope().counter("bar").inc(); + context.scope().counterFromString("bar").inc(); return [](Network::FilterManager&) -> void {}; } }; @@ -464,10 +464,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, StatsScopeTest) { EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {false})); manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); - manager_->listeners().front().get().listenerScope().counter("foo").inc(); + manager_->listeners().front().get().listenerScope().counterFromString("foo").inc(); - EXPECT_EQ(1UL, server_.stats_store_.counter("bar").value()); - EXPECT_EQ(1UL, server_.stats_store_.counter("listener.127.0.0.1_1234.foo").value()); + EXPECT_EQ(1UL, server_.stats_store_.counterFromString("bar").value()); + EXPECT_EQ(1UL, server_.stats_store_.counterFromString("listener.127.0.0.1_1234.foo").value()); } TEST_F(ListenerManagerImplTest, NotDefaultListenerFiltersTimeout) { @@ -1569,7 +1569,7 @@ traffic_direction: OUTBOUND EXPECT_CALL(*worker_, stopListener(_, _)).Times(1); EXPECT_CALL(*listener_factory_.socket_, close()).Times(1); manager_->stopListeners(ListenerManager::StopListenersType::InboundOnly); - EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_stopped").value()); + EXPECT_EQ(1, server_.stats_store_.counterFromString("listener_manager.listener_stopped").value()); // Validate that listener creation in outbound direction is allowed. const std::string listener_bar_outbound_yaml = R"EOF( @@ -1641,7 +1641,7 @@ name: foo EXPECT_CALL(*listener_factory_.socket_, close()); EXPECT_CALL(*listener_foo, onDestroy()); manager_->stopListeners(ListenerManager::StopListenersType::All); - EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_stopped").value()); + EXPECT_EQ(1, server_.stats_store_.counterFromString("listener_manager.listener_stopped").value()); // Validate that adding a listener is not allowed after all listeners are stopped. const std::string listener_bar_yaml = R"EOF( @@ -1712,7 +1712,7 @@ traffic_direction: INBOUND EXPECT_CALL(*listener_factory_.socket_, close()); EXPECT_CALL(*listener_foo, onDestroy()); manager_->stopListeners(ListenerManager::StopListenersType::InboundOnly); - EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_stopped").value()); + EXPECT_EQ(1, server_.stats_store_.counterFromString("listener_manager.listener_stopped").value()); } TEST_F(ListenerManagerImplTest, AddListenerFailure) { @@ -1747,7 +1747,9 @@ name: foo EXPECT_CALL(*listener_foo, onDestroy()); worker_->callRemovalCompletion(); - EXPECT_EQ(1UL, server_.stats_store_.counter("listener_manager.listener_create_failure").value()); + EXPECT_EQ( + 1UL, + server_.stats_store_.counterFromString("listener_manager.listener_create_failure").value()); } TEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) { @@ -1761,9 +1763,9 @@ TEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) { )EOF"; manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); - manager_->listeners().front().get().listenerScope().counter("foo").inc(); + manager_->listeners().front().get().listenerScope().counterFromString("foo").inc(); - EXPECT_EQ(1UL, server_.stats_store_.counter("listener.[__1]_10000.foo").value()); + EXPECT_EQ(1UL, server_.stats_store_.counterFromString("listener.[__1]_10000.foo").value()); } TEST_F(ListenerManagerImplTest, DuplicateAddressDontBind) { @@ -2964,8 +2966,9 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsFilterChainWithoutTlsInspector Network::ListenerConfig& listener = manager_->listeners().back().get(); Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory(); Network::MockListenerFilterManager manager; - EXPECT_CALL(manager, addAcceptFilter_(_)) - .WillOnce(Invoke([&](Network::ListenerFilterPtr&) -> void {})); + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr&) -> void {})); EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager)); } @@ -2996,8 +2999,9 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Network::ListenerConfig& listener = manager_->listeners().back().get(); Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory(); Network::MockListenerFilterManager manager; - EXPECT_CALL(manager, addAcceptFilter_(_)) - .WillOnce(Invoke([&](Network::ListenerFilterPtr&) -> void {})); + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr&) -> void {})); EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager)); } @@ -3023,8 +3027,9 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SniFilterChainWithoutTlsInspector Network::ListenerConfig& listener = manager_->listeners().back().get(); Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory(); Network::MockListenerFilterManager manager; - EXPECT_CALL(manager, addAcceptFilter_(_)) - .WillOnce(Invoke([&](Network::ListenerFilterPtr&) -> void {})); + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr&) -> void {})); EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager)); } @@ -3050,8 +3055,9 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AlpnFilterChainWithoutTlsInspecto Network::ListenerConfig& listener = manager_->listeners().back().get(); Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory(); Network::MockListenerFilterManager manager; - EXPECT_CALL(manager, addAcceptFilter_(_)) - .WillOnce(Invoke([&](Network::ListenerFilterPtr&) -> void {})); + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr&) -> void {})); EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager)); } @@ -3077,7 +3083,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CustomTransportProtocolWithSniWit Network::ListenerConfig& listener = manager_->listeners().back().get(); Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory(); Network::MockListenerFilterManager manager; - EXPECT_CALL(manager, addAcceptFilter_(_)).Times(0); + EXPECT_CALL(manager, addAcceptFilter_(_, _)).Times(0); EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager)); } @@ -3269,15 +3275,29 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Metadata) { routes: - match: { prefix: "/" } route: { cluster: service_foo } + listener_filters: + - name: "envoy.filters.listener.original_dst" + config: {} )EOF", Network::Address::IpVersion::v4); + Configuration::ListenerFactoryContext* listener_factory_context = nullptr; + // Extract listener_factory_context avoid accessing private member. + ON_CALL(listener_factory_, createListenerFilterFactoryList(_, _)) + .WillByDefault( + Invoke([&listener_factory_context]( + const Protobuf::RepeatedPtrField& + filters, + Configuration::ListenerFactoryContext& context) + -> std::vector { + listener_factory_context = &context; + return ProdListenerComponentFactory::createListenerFilterFactoryList_(filters, context); + })); manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); - auto context = dynamic_cast(&manager_->listeners().front().get()); - ASSERT_NE(nullptr, context); - EXPECT_EQ("test_value", - Config::Metadata::metadataValue(&context->listenerMetadata(), "com.bar.foo", "baz") - .string_value()); - EXPECT_EQ(envoy::config::core::v3::INBOUND, context->direction()); + ASSERT_NE(nullptr, listener_factory_context); + EXPECT_EQ("test_value", Config::Metadata::metadataValue( + &listener_factory_context->listenerMetadata(), "com.bar.foo", "baz") + .string_value()); + EXPECT_EQ(envoy::config::core::v3::INBOUND, listener_factory_context->direction()); } TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { @@ -3312,8 +3332,9 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { return socket; })); - EXPECT_CALL(manager, addAcceptFilter_(_)) - .WillOnce(Invoke([&](Network::ListenerFilterPtr& filter) -> void { + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr& filter) -> void { EXPECT_EQ(Network::FilterStatus::Continue, filter->onAccept(callbacks)); })); @@ -3332,10 +3353,11 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilter) { public: // NamedListenerFilterConfigFactory Network::ListenerFilterFactoryCb - createFilterFactoryFromProto(const Protobuf::Message&, - Configuration::ListenerFactoryContext&) override { + createListenerFilterFactoryFromProto(const Protobuf::Message&, + const Network::ListenerFilterMatcherSharedPtr&, + Configuration::ListenerFactoryContext&) override { return [](Network::ListenerFilterManager& filter_manager) -> void { - filter_manager.addAcceptFilter(std::make_unique()); + filter_manager.addAcceptFilter(nullptr, std::make_unique()); }; } @@ -3385,8 +3407,9 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilter) { return socket; })); - EXPECT_CALL(manager, addAcceptFilter_(_)) - .WillOnce(Invoke([&](Network::ListenerFilterPtr& filter) -> void { + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr& filter) -> void { EXPECT_EQ(Network::FilterStatus::Continue, filter->onAccept(callbacks)); })); @@ -3408,10 +3431,11 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) { public: // NamedListenerFilterConfigFactory Network::ListenerFilterFactoryCb - createFilterFactoryFromProto(const Protobuf::Message&, - Configuration::ListenerFactoryContext&) override { + createListenerFilterFactoryFromProto(const Protobuf::Message&, + const Network::ListenerFilterMatcherSharedPtr&, + Configuration::ListenerFactoryContext&) override { return [](Network::ListenerFilterManager& filter_manager) -> void { - filter_manager.addAcceptFilter(std::make_unique()); + filter_manager.addAcceptFilter(nullptr, std::make_unique()); }; } @@ -3461,8 +3485,9 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) { return socket; })); - EXPECT_CALL(manager, addAcceptFilter_(_)) - .WillOnce(Invoke([&](Network::ListenerFilterPtr& filter) -> void { + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr& filter) -> void { EXPECT_EQ(Network::FilterStatus::Continue, filter->onAccept(callbacks)); })); @@ -3634,8 +3659,8 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AddressResolver) { NiceMock mock_resolver; EXPECT_CALL(mock_resolver, resolve(_)) - .WillOnce(Return(Network::Utility::parseInternetAddress("127.0.0.1", 1111, false))); - + .Times(2) + .WillRepeatedly(Return(Network::Utility::parseInternetAddress("127.0.0.1", 1111, false))); Registry::InjectFactory register_resolver(mock_resolver); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 772c90c606fa..2ab79f6f487b 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -1,3 +1,5 @@ +#include + #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/listener.pb.h" @@ -28,13 +30,18 @@ namespace Server { class ListenerHandle { public: - ListenerHandle() { EXPECT_CALL(*drain_manager_, startParentShutdownSequence()).Times(0); } + ListenerHandle(bool need_local_drain_manager = true) { + if (need_local_drain_manager) { + drain_manager_ = new MockDrainManager(); + EXPECT_CALL(*drain_manager_, startParentShutdownSequence()).Times(0); + } + } ~ListenerHandle() { onDestroy(); } MOCK_METHOD(void, onDestroy, ()); Init::ExpectableTargetImpl target_; - MockDrainManager* drain_manager_ = new MockDrainManager(); + MockDrainManager* drain_manager_{}; Configuration::FactoryContext* context_{}; }; @@ -79,8 +86,8 @@ class ListenerManagerImplTest : public testing::Test { return listener_tag_++; })); - local_address_.reset(new Network::Address::Ipv4Instance("127.0.0.1", 1234)); - remote_address_.reset(new Network::Address::Ipv4Instance("127.0.0.1", 1234)); + local_address_ = std::make_shared("127.0.0.1", 1234); + remote_address_ = std::make_shared("127.0.0.1", 1234); EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); EXPECT_CALL(os_sys_calls_, getsockname) .WillRepeatedly(Invoke([this](os_fd_t sockfd, sockaddr* addr, socklen_t* addrlen) { @@ -132,7 +139,7 @@ class ListenerManagerImplTest : public testing::Test { const std::vector& application_protocols, const std::string& source_address, uint16_t source_port) { if (absl::StartsWith(destination_address, "/")) { - local_address_.reset(new Network::Address::PipeInstance(destination_address)); + local_address_ = std::make_shared(destination_address); } else { local_address_ = Network::Utility::parseInternetAddress(destination_address, destination_port); @@ -146,7 +153,7 @@ class ListenerManagerImplTest : public testing::Test { .WillByDefault(ReturnRef(application_protocols)); if (absl::StartsWith(source_address, "/")) { - remote_address_.reset(new Network::Address::PipeInstance(source_address)); + remote_address_ = std::make_shared(source_address); } else { remote_address_ = Network::Utility::parseInternetAddress(source_address, source_port); } diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index df8ab2910261..6e6219ec35a4 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -486,7 +486,7 @@ TEST_F(OptionsImplPlatformLinuxTest, AffinityTest4) { class TestFactory : public Config::TypedFactory { public: - virtual ~TestFactory() = default; + ~TestFactory() override = default; std::string category() const override { return "test"; } std::string configType() override { return "google.protobuf.StringValue"; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { @@ -501,7 +501,7 @@ class TestTestFactory : public TestFactory { class TestingFactory : public Config::TypedFactory { public: - virtual ~TestingFactory() = default; + ~TestingFactory() override = default; std::string category() const override { return "testing"; } std::string configType() override { return "google.protobuf.StringValue"; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/test/server/overload_manager_impl_test.cc b/test/server/overload_manager_impl_test.cc index acdaa001b885..ee801a6d97af 100644 --- a/test/server/overload_manager_impl_test.cc +++ b/test/server/overload_manager_impl_test.cc @@ -8,6 +8,7 @@ #include "extensions/resource_monitors/common/factory_base.h" +#include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/thread_local/mocks.h" @@ -55,6 +56,7 @@ class FakeResourceMonitor : public ResourceMonitor { Event::Dispatcher& dispatcher_; }; +template class FakeResourceMonitorFactory : public Server::Configuration::ResourceMonitorFactory { public: FakeResourceMonitorFactory(const std::string& name) : monitor_(nullptr), name_(name) {} @@ -68,7 +70,7 @@ class FakeResourceMonitorFactory : public Server::Configuration::ResourceMonitor } ProtobufTypes::MessagePtr createEmptyConfigProto() override { - return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + return ProtobufTypes::MessagePtr{new ConfigType()}; } std::string name() const override { return name_; } @@ -133,13 +135,13 @@ class OverloadManagerImplTest : public testing::Test { parseConfig(config), validation_visitor_, *api_); } - FakeResourceMonitorFactory factory1_; - FakeResourceMonitorFactory factory2_; + FakeResourceMonitorFactory factory1_; + FakeResourceMonitorFactory factory2_; Registry::InjectFactory register_factory1_; Registry::InjectFactory register_factory2_; NiceMock dispatcher_; NiceMock* timer_; // not owned - Stats::IsolatedStoreImpl stats_; + Stats::TestUtil::TestStore stats_; NiceMock thread_local_; Event::TimerCb timer_cb_; NiceMock validation_visitor_; diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 55c787d2266a..2771b5c3044f 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -41,7 +41,7 @@ namespace { TEST(ServerInstanceUtil, flushHelper) { InSequence s; - Stats::IsolatedStoreImpl store; + Stats::TestUtil::TestStore store; Stats::Counter& c = store.counter("hello"); c.inc(); store.gauge("world", Stats::Gauge::ImportMode::Accumulate).set(5); @@ -254,7 +254,7 @@ class ServerInstanceImplTest : public ServerInstanceImplTestBase, // Custom StatsSink that just increments a counter when flush is called. class CustomStatsSink : public Stats::Sink { public: - CustomStatsSink(Stats::Scope& scope) : stats_flushed_(scope.counter("stats.flushed")) {} + CustomStatsSink(Stats::Scope& scope) : stats_flushed_(scope.counterFromString("stats.flushed")) {} // Stats::Sink void flush(Stats::MetricSnapshot&) override { stats_flushed_.inc(); } @@ -321,9 +321,10 @@ TEST_P(ServerInstanceImplTest, EmptyShutdownLifecycleNotifications) { server_->dispatcher().post([&] { server_->shutdown(); }); server_thread->join(); // Validate that initialization_time histogram value has been set. - EXPECT_TRUE( - stats_store_.histogram("server.initialization_time_ms", Stats::Histogram::Unit::Milliseconds) - .used()); + EXPECT_TRUE(stats_store_ + .histogramFromString("server.initialization_time_ms", + Stats::Histogram::Unit::Milliseconds) + .used()); EXPECT_EQ(0L, TestUtility::findGauge(stats_store_, "server.state")->value()); } @@ -512,8 +513,8 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(ServerStatsTest, FlushStats) { initialize("test/server/test_data/server/empty_bootstrap.yaml"); - Stats::Gauge& recent_lookups = - stats_store_.gauge("server.stats_recent_lookups", Stats::Gauge::ImportMode::NeverImport); + Stats::Gauge& recent_lookups = stats_store_.gaugeFromString( + "server.stats_recent_lookups", Stats::Gauge::ImportMode::NeverImport); EXPECT_EQ(0, recent_lookups.value()); flushStats(); uint64_t strobed_recent_lookups = recent_lookups.value(); diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 22523d299340..87377b7022c7 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -251,8 +251,9 @@ std::string TestEnvironment::substitute(const std::string& str, const std::unordered_map path_map = { {"test_tmpdir", TestEnvironment::temporaryDirectory()}, {"test_udsdir", TestEnvironment::unixDomainSocketDirectory()}, - {"test_rundir", TestEnvironment::runfilesDirectory()}, + {"test_rundir", runfiles_ != nullptr ? TestEnvironment::runfilesDirectory() : "invalid"}, }; + std::string out_json_string = str; for (const auto& it : path_map) { const std::regex port_regex("\\{\\{ " + it.first + " \\}\\}"); diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 8765711f31c2..6d4d4fb09bad 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -191,24 +191,23 @@ const Network::FilterChainSharedPtr createEmptyFilterChainWithRawBufferSockets() namespace { struct SyncPacketProcessor : public Network::UdpPacketProcessor { - SyncPacketProcessor(Network::UdpRecvData& data) : data_(data) { ASSERT(data.buffer_ == nullptr); } + SyncPacketProcessor(std::list& data) : data_(data) {} void processPacket(Network::Address::InstanceConstSharedPtr local_address, Network::Address::InstanceConstSharedPtr peer_address, Buffer::InstancePtr buffer, MonotonicTime receive_time) override { - data_.addresses_.local_ = std::move(local_address); - data_.addresses_.peer_ = std::move(peer_address); - data_.buffer_ = std::move(buffer); - data_.receive_time_ = receive_time; + Network::UdpRecvData datagram{ + {std::move(local_address), std::move(peer_address)}, std::move(buffer), receive_time}; + data_.push_back(std::move(datagram)); } uint64_t maxPacketSize() const override { return Network::MAX_UDP_PACKET_SIZE; } - Network::UdpRecvData& data_; + std::list& data_; }; } // namespace Api::IoCallUint64Result readFromSocket(IoHandle& handle, const Address::Instance& local_address, - UdpRecvData& data) { + std::list& data) { SyncPacketProcessor processor(data); return Network::Utility::readFromSocket(handle, local_address, processor, MonotonicTime(std::chrono::seconds(0)), nullptr); @@ -229,10 +228,13 @@ void UdpSyncPeer::write(const std::string& buffer, const Network::Address::Insta } void UdpSyncPeer::recv(Network::UdpRecvData& datagram) { - datagram = Network::UdpRecvData(); - const auto rc = - Network::Test::readFromSocket(socket_->ioHandle(), *socket_->localAddress(), datagram); - ASSERT_TRUE(rc.ok()); + if (received_datagrams_.empty()) { + const auto rc = Network::Test::readFromSocket(socket_->ioHandle(), *socket_->localAddress(), + received_datagrams_); + ASSERT_TRUE(rc.ok()); + } + datagram = std::move(received_datagrams_.front()); + received_datagrams_.pop_front(); } } // namespace Test diff --git a/test/test_common/network_utility.h b/test/test_common/network_utility.h index d3e155fb944c..4a7a7d05a4c4 100644 --- a/test/test_common/network_utility.h +++ b/test/test_common/network_utility.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/network/address.h" @@ -186,6 +187,7 @@ class UdpSyncPeer { private: const Network::SocketPtr socket_; + std::list received_datagrams_; }; } // namespace Test diff --git a/test/test_common/registry.h b/test/test_common/registry.h index 85a3b7990026..6fce498b29f4 100644 --- a/test/test_common/registry.h +++ b/test/test_common/registry.h @@ -16,22 +16,22 @@ template class InjectFactory { public: InjectFactory(Base& instance) : instance_(instance) { EXPECT_STRNE(instance.category().c_str(), ""); - displaced_ = Registry::FactoryRegistry::replaceFactoryForTest(instance_); + + original_ = Registry::FactoryRegistry::getFactory(instance_.name()); + restore_factories_ = Registry::FactoryRegistry::replaceFactoryForTest(instance_); } ~InjectFactory() { - if (displaced_) { - auto injected = Registry::FactoryRegistry::replaceFactoryForTest(*displaced_); - EXPECT_EQ(injected, &instance_); - } else { - Registry::FactoryRegistry::removeFactoryForTest(instance_.name(), - instance_.configType()); - } + restore_factories_(); + + auto* restored = Registry::FactoryRegistry::getFactory(instance_.name()); + ASSERT(restored == original_); } private: Base& instance_; - Base* displaced_{}; + Base* original_{}; + std::function restore_factories_; }; } // namespace Registry diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 6209840741b6..f7be0dee9409 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -97,22 +97,19 @@ bool TestUtility::buffersEqual(const Buffer::Instance& lhs, const Buffer::Instan // Check whether the two buffers contain the same content. It is valid for the content // to be arranged differently in the buffers. For example, lhs could have one slice // containing 10 bytes while rhs has ten slices containing one byte each. - uint64_t lhs_num_slices = lhs.getRawSlices(nullptr, 0); - uint64_t rhs_num_slices = rhs.getRawSlices(nullptr, 0); - absl::FixedArray lhs_slices(lhs_num_slices); - lhs.getRawSlices(lhs_slices.begin(), lhs_num_slices); - absl::FixedArray rhs_slices(rhs_num_slices); - rhs.getRawSlices(rhs_slices.begin(), rhs_num_slices); + Buffer::RawSliceVector lhs_slices = lhs.getRawSlices(); + Buffer::RawSliceVector rhs_slices = rhs.getRawSlices(); + size_t rhs_slice = 0; size_t rhs_offset = 0; - for (size_t lhs_slice = 0; lhs_slice < lhs_num_slices; lhs_slice++) { - for (size_t lhs_offset = 0; lhs_offset < lhs_slices[lhs_slice].len_; lhs_offset++) { + for (auto& lhs_slice : lhs_slices) { + for (size_t lhs_offset = 0; lhs_offset < lhs_slice.len_; lhs_offset++) { while (rhs_offset >= rhs_slices[rhs_slice].len_) { rhs_slice++; - ASSERT(rhs_slice < rhs_num_slices); + ASSERT(rhs_slice < rhs_slices.size()); rhs_offset = 0; } - auto lhs_str = static_cast(lhs_slices[lhs_slice].mem_); + auto lhs_str = static_cast(lhs_slice.mem_); auto rhs_str = static_cast(rhs_slices[rhs_slice].mem_); if (lhs_str[lhs_offset] != rhs_str[rhs_offset]) { return false; diff --git a/tools/BUILD b/tools/BUILD index dbf854b10495..fbd9abfc774b 100644 --- a/tools/BUILD +++ b/tools/BUILD @@ -30,7 +30,6 @@ py_library( srcs = [ "run_command.py", ], - srcs_version = "PY3", visibility = ["//visibility:public"], ) diff --git a/tools/api_boost/testdata/decl_ref_expr.cc.gold b/tools/api_boost/testdata/decl_ref_expr.cc.gold index c21cd57518ab..d7a337fc38b5 100644 --- a/tools/api_boost/testdata/decl_ref_expr.cc.gold +++ b/tools/api_boost/testdata/decl_ref_expr.cc.gold @@ -1,5 +1,5 @@ #include "envoy/api/v2/route/route_components.pb.h" -#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/cluster/v4alpha/cluster.pb.h" #include "envoy/config/overload/v2alpha/overload.pb.h" #include "envoy/config/overload/v3/overload.pb.h" @@ -9,7 +9,7 @@ using envoy::config::overload::v3::Trigger; -using envoy::config::cluster::v3::Cluster; +using envoy::config::cluster::v4alpha::Cluster; using MutableStringClusterAccessor = std::string* (Cluster::*)(); class ThresholdTriggerImpl { @@ -35,9 +35,9 @@ public: envoy::config::overload::v3::ThresholdTrigger::default_instance(); ASSERT(envoy::config::overload::v3::Trigger::TriggerOneofCase::kThreshold == Trigger::kThreshold); ASSERT(Foo::kThreshold == Trigger::kThreshold); - envoy::config::cluster::v3::Cluster::LbPolicy_Name(0); - static_cast(envoy::config::cluster::v3::Cluster::MAGLEV); - MutableStringClusterAccessor foo2 = &envoy::config::cluster::v3::Cluster::mutable_name; + envoy::config::cluster::v4alpha::Cluster::LbPolicy_Name(0); + static_cast(envoy::config::cluster::v4alpha::Cluster::MAGLEV); + MutableStringClusterAccessor foo2 = &envoy::config::cluster::v4alpha::Cluster::mutable_name; static_cast(foo2); } diff --git a/tools/api_boost/testdata/deprecate.cc.gold b/tools/api_boost/testdata/deprecate.cc.gold index cdae14e43c68..bee1dacfe56e 100644 --- a/tools/api_boost/testdata/deprecate.cc.gold +++ b/tools/api_boost/testdata/deprecate.cc.gold @@ -1,11 +1,11 @@ -#include "envoy/config/cluster/v3/cluster.pb.h" -#include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/config/cluster/v4alpha/cluster.pb.h" +#include "envoy/config/route/v4alpha/route_components.pb.h" #include "envoy/type/matcher/v3/string.pb.h" void test() { - envoy::config::route::v3::VirtualHost vhost; + envoy::config::route::v4alpha::VirtualHost vhost; vhost.hidden_envoy_deprecated_per_filter_config(); vhost.mutable_hidden_envoy_deprecated_per_filter_config(); static_cast(envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex); - static_cast(envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB); + static_cast(envoy::config::cluster::v4alpha::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB); } diff --git a/tools/api_boost/testdata/elaborated_type.cc.gold b/tools/api_boost/testdata/elaborated_type.cc.gold index 213e579f46e6..442426177598 100644 --- a/tools/api_boost/testdata/elaborated_type.cc.gold +++ b/tools/api_boost/testdata/elaborated_type.cc.gold @@ -1,10 +1,10 @@ -#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/cluster/v4alpha/cluster.pb.h" #include "envoy/config/overload/v3/overload.pb.h" class ThresholdTriggerImpl { public: ThresholdTriggerImpl(const envoy::config::overload::v3::ThresholdTrigger& /*config*/) {} - void someMethod(envoy::config::cluster::v3::Cluster::LbPolicy) {} + void someMethod(envoy::config::cluster::v4alpha::Cluster::LbPolicy) {} const envoy::config::overload::v3::Trigger::TriggerOneofCase case_{}; }; diff --git a/tools/api_boost/testdata/rename.cc.gold b/tools/api_boost/testdata/rename.cc.gold index 98a823fb1826..124a528b05fd 100644 --- a/tools/api_boost/testdata/rename.cc.gold +++ b/tools/api_boost/testdata/rename.cc.gold @@ -1,7 +1,7 @@ -#include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/config/route/v4alpha/route_components.pb.h" void test() { - envoy::config::route::v3::RouteAction route_action; + envoy::config::route::v4alpha::RouteAction route_action; route_action.host_rewrite_literal(); route_action.set_host_rewrite_literal("blah"); } diff --git a/tools/api_boost/testdata/validate.cc.gold b/tools/api_boost/testdata/validate.cc.gold index 6479cc251a32..f358c7aec715 100644 --- a/tools/api_boost/testdata/validate.cc.gold +++ b/tools/api_boost/testdata/validate.cc.gold @@ -1,10 +1,10 @@ -#include "envoy/config/cluster/v3/cluster.pb.h" -#include "envoy/config/cluster/v3/cluster.pb.validate.h" +#include "envoy/config/cluster/v4alpha/cluster.pb.h" +#include "envoy/config/cluster/v4alpha/cluster.pb.validate.h" #include "envoy/protobuf/message_validator.h" #include "common/protobuf/utility.h" void foo(Envoy::ProtobufMessage::ValidationVisitor& validator) { - envoy::config::cluster::v3::Cluster msg; - Envoy::MessageUtil::downcastAndValidate(msg, validator); + envoy::config::cluster::v4alpha::Cluster msg; + Envoy::MessageUtil::downcastAndValidate(msg, validator); } diff --git a/tools/api_proto_plugin/BUILD b/tools/api_proto_plugin/BUILD index 4eb5ddaa7832..5c6c535a4a88 100644 --- a/tools/api_proto_plugin/BUILD +++ b/tools/api_proto_plugin/BUILD @@ -1,5 +1,6 @@ licenses(["notice"]) # Apache 2 +load("@bazel_skylib//rules:common_settings.bzl", "string_flag") load("@rules_python//python:defs.bzl", "py_library") load("//tools/type_whisperer:type_database.bzl", "type_database") @@ -12,7 +13,6 @@ py_library( "type_context.py", "visitor.py", ], - srcs_version = "PY3", visibility = ["//visibility:public"], deps = [ "@com_google_protobuf//:protobuf_python", @@ -22,7 +22,6 @@ py_library( py_library( name = "utils", srcs = ["utils.py"], - srcs_version = "PY3", visibility = ["//visibility:public"], ) @@ -38,3 +37,9 @@ type_database( targets = [":default_type_db_target"], visibility = ["//visibility:public"], ) + +string_flag( + name = "extra_args", + build_setting_default = "", + visibility = ["//visibility:public"], +) diff --git a/tools/api_proto_plugin/plugin.bzl b/tools/api_proto_plugin/plugin.bzl index 5bc0ef7f7112..95568e47123a 100644 --- a/tools/api_proto_plugin/plugin.bzl +++ b/tools/api_proto_plugin/plugin.bzl @@ -1,3 +1,4 @@ +load("@bazel_skylib//rules:common_settings.bzl", "BuildSettingInfo") load("@rules_proto//proto:defs.bzl", "ProtoInfo") # Borrowed from https://github.com/grpc/grpc-java/blob/v1.24.1/java_grpc_library.bzl#L61 @@ -56,6 +57,8 @@ def api_proto_plugin_impl(target, ctx, output_group, mnemonic, output_suffixes): if len(ctx.attr._type_db.files.to_list()) != 1: fail("{} must have one type database file".format(ctx.attr._type_db)) args += ["--api_proto_plugin_opt=type_db_path=" + ctx.attr._type_db.files.to_list()[0].path] + if hasattr(ctx.attr, "_extra_args"): + args += ["--api_proto_plugin_opt=extra_args=" + ctx.attr._extra_args[BuildSettingInfo].value] args += [src.path for src in target[ProtoInfo].direct_sources] env = {} @@ -89,6 +92,9 @@ def api_proto_plugin_aspect(tool_label, aspect_impl, use_type_db = False): _attrs["_type_db"] = attr.label( default = Label("@envoy//tools/api_proto_plugin:default_type_db"), ) + _attrs["_extra_args"] = attr.label( + default = Label("@envoy//tools/api_proto_plugin:extra_args"), + ) return aspect( attr_aspects = ["deps"], attrs = _attrs, diff --git a/tools/api_proto_plugin/plugin.py b/tools/api_proto_plugin/plugin.py index 31ac2c0dbb94..684bef38dee6 100644 --- a/tools/api_proto_plugin/plugin.py +++ b/tools/api_proto_plugin/plugin.py @@ -23,14 +23,18 @@ # FileDescriptorProto transformer; this is applied to the input # before any output generation. 'xform', + # Supply --//tools/api_proto_plugin CLI args as a parameters dictionary + # to visitor_factory constructor and xform function? + 'want_params', ]) -def DirectOutputDescriptor(output_suffix, visitor): - return OutputDescriptor(output_suffix, visitor, lambda x: x) +def DirectOutputDescriptor(output_suffix, visitor, want_params=False): + return OutputDescriptor(output_suffix, visitor, (lambda x, _: x) if want_params else lambda x: x, + want_params) -def Plugin(output_descriptors, parameter_callback=None): +def Plugin(output_descriptors): """Protoc plugin entry point. This defines protoc plugin and manages the stdin -> stdout flow. An @@ -48,9 +52,6 @@ def Plugin(output_descriptors, parameter_callback=None): response = plugin_pb2.CodeGeneratorResponse() cprofile_enabled = os.getenv('CPROFILE_ENABLED') - if request.HasField("parameter") and parameter_callback: - parameter_callback(request.parameter) - # We use request.file_to_generate rather than request.file_proto here since we # are invoked inside a Bazel aspect, each node in the DAG will be visited once # by the aspect and we only want to generate docs for the current node. @@ -63,9 +64,17 @@ def Plugin(output_descriptors, parameter_callback=None): for od in output_descriptors: f = response.file.add() f.name = file_proto.name + od.output_suffix - xformed_proto = od.xform(file_proto) - f.content = traverse.TraverseFile(xformed_proto, - od.visitor_factory()) if xformed_proto else '' + # Don't run API proto plugins on things like WKT types etc. + if not file_proto.package.startswith('envoy.'): + continue + if request.HasField("parameter") and od.want_params: + params = dict(param.split('=') for param in request.parameter.split(',')) + xformed_proto = od.xform(file_proto, params) + visitor_factory = od.visitor_factory(params) + else: + xformed_proto = od.xform(file_proto) + visitor_factory = od.visitor_factory() + f.content = traverse.TraverseFile(xformed_proto, visitor_factory) if xformed_proto else '' if cprofile_enabled: pr.disable() stats_stream = io.StringIO() diff --git a/tools/api_proto_plugin/traverse.py b/tools/api_proto_plugin/traverse.py index 7d7490537904..2718e678fc32 100644 --- a/tools/api_proto_plugin/traverse.py +++ b/tools/api_proto_plugin/traverse.py @@ -50,12 +50,14 @@ def TraverseMessage(type_context, msg_proto, visitor): if nested_msg.options.map_entry } nested_msgs = [ - TraverseMessage(type_context.ExtendNestedMessage(index, nested_msg.name), nested_msg, visitor) - for index, nested_msg in enumerate(msg_proto.nested_type) + TraverseMessage( + type_context.ExtendNestedMessage(index, nested_msg.name, nested_msg.options.deprecated), + nested_msg, visitor) for index, nested_msg in enumerate(msg_proto.nested_type) ] nested_enums = [ - TraverseEnum(type_context.ExtendNestedEnum(index, nested_enum.name), nested_enum, visitor) - for index, nested_enum in enumerate(msg_proto.enum_type) + TraverseEnum( + type_context.ExtendNestedEnum(index, nested_enum.name, nested_enum.options.deprecated), + nested_enum, visitor) for index, nested_enum in enumerate(msg_proto.enum_type) ] return visitor.VisitMessage(msg_proto, type_context, nested_msgs, nested_enums) @@ -77,11 +79,11 @@ def TraverseFile(file_proto, visitor): for index, service in enumerate(file_proto.service) ] msgs = [ - TraverseMessage(package_type_context.ExtendMessage(index, msg.name), msg, visitor) - for index, msg in enumerate(file_proto.message_type) + TraverseMessage(package_type_context.ExtendMessage(index, msg.name, msg.options.deprecated), + msg, visitor) for index, msg in enumerate(file_proto.message_type) ] enums = [ - TraverseEnum(package_type_context.ExtendEnum(index, enum.name), enum, visitor) - for index, enum in enumerate(file_proto.enum_type) + TraverseEnum(package_type_context.ExtendEnum(index, enum.name, enum.options.deprecated), enum, + visitor) for index, enum in enumerate(file_proto.enum_type) ] return visitor.VisitFile(file_proto, package_type_context, services, msgs, enums) diff --git a/tools/api_proto_plugin/type_context.py b/tools/api_proto_plugin/type_context.py index 5ba6bb06a6b0..a8ba0bc17fd9 100644 --- a/tools/api_proto_plugin/type_context.py +++ b/tools/api_proto_plugin/type_context.py @@ -152,8 +152,9 @@ def __init__(self, source_code_info, name): # Map from a message's oneof index to the "required" bool property. self.oneof_required = {} self.type_name = 'file' + self.deprecated = False - def _Extend(self, path, type_name, name): + def _Extend(self, path, type_name, name, deprecated=False): if not self.name: extended_name = name else: @@ -165,25 +166,28 @@ def _Extend(self, path, type_name, name): extended.oneof_fields = self.oneof_fields.copy() extended.oneof_names = self.oneof_names.copy() extended.oneof_required = self.oneof_required.copy() + extended.deprecated = self.deprecated or deprecated return extended - def ExtendMessage(self, index, name): + def ExtendMessage(self, index, name, deprecated): """Extend type context with a message. Args: index: message index in file. name: message name. + deprecated: is the message depreacted? """ - return self._Extend([4, index], 'message', name) + return self._Extend([4, index], 'message', name, deprecated) - def ExtendNestedMessage(self, index, name): + def ExtendNestedMessage(self, index, name, deprecated): """Extend type context with a nested message. Args: index: nested message index in message. name: message name. + deprecated: is the message depreacted? """ - return self._Extend([3, index], 'message', name) + return self._Extend([3, index], 'message', name, deprecated) def ExtendField(self, index, name): """Extend type context with a field. @@ -194,14 +198,15 @@ def ExtendField(self, index, name): """ return self._Extend([2, index], 'field', name) - def ExtendEnum(self, index, name): + def ExtendEnum(self, index, name, deprecated): """Extend type context with an enum. Args: index: enum index in file. name: enum name. + deprecated: is the message depreacted? """ - return self._Extend([5, index], 'enum', name) + return self._Extend([5, index], 'enum', name, deprecated) def ExtendService(self, index, name): """Extend type context with a service. @@ -212,14 +217,15 @@ def ExtendService(self, index, name): """ return self._Extend([6, index], 'service', name) - def ExtendNestedEnum(self, index, name): + def ExtendNestedEnum(self, index, name, deprecated): """Extend type context with a nested enum. Args: index: enum index in message. name: enum name. + deprecated: is the message depreacted? """ - return self._Extend([4, index], 'enum', name) + return self._Extend([4, index], 'enum', name, deprecated) def ExtendEnumValue(self, index, name): """Extend type context with an enum enum. diff --git a/tools/api_proto_plugin/utils.py b/tools/api_proto_plugin/utils.py index a8a9f2c14ae8..a2ac62f03949 100644 --- a/tools/api_proto_plugin/utils.py +++ b/tools/api_proto_plugin/utils.py @@ -1,4 +1,3 @@ -import glob import os @@ -27,15 +26,6 @@ def BazelBinPathForOutputArtifact(label, suffix, root=''): Returns: Path in bazel-bin/external/envoy_api_canonical for label output with given suffix. """ - # We use ** glob matching here to deal with the fact that we have something - # like - # bazel-bin/external/envoy_api/envoy/admin/v2alpha/pkg/envoy/admin/v2alpha/certs.proto.proto - # and we don't want to have to do a nested loop and slow bazel query to - # recover the canonical package part of the path. - # While we may have reformatted the file multiple times due to the transitive - # dependencies in the aspect above, they all look the same. So, just pick an - # arbitrary match and we're done. - glob_pattern = os.path.join( - root, 'bazel-bin/external/envoy_api_canonical/**/%s%s' % - (ProtoFileCanonicalFromLabel(label), suffix)) - return glob.glob(glob_pattern, recursive=True)[0] + proto_file_path = ProtoFileCanonicalFromLabel(label) + return os.path.join(root, 'bazel-bin/external/envoy_api_canonical', + os.path.dirname(proto_file_path), 'pkg', proto_file_path + suffix) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 48bad810ba41..f8cdf5c9356e 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -404,13 +404,15 @@ def hasInvalidAngleBracketDirectory(line): return subdir in SUBDIR_SET -VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* [a-z \-_]*: [a-z:`]") +VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* ([a-z \-_]*): ([a-z:`]+)") VERSION_HISTORY_NEW_RELEASE_REGEX = re.compile("^====[=]+$") def checkCurrentReleaseNotes(file_path, error_messages): in_current_release = False + first_word_of_prior_line = '' + next_word_to_check = '' # first word after : for line_number, line in enumerate(readLines(file_path)): def reportError(message): @@ -423,9 +425,26 @@ def reportError(message): # If we see a version marker we are now in the section for the current release. in_current_release = True - if line.startswith("*") and not VERSION_HISTORY_NEW_LINE_REGEX.match(line): - reportError("Version history line malformed. " - "Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s" % line) + # Do basic alphabetization checks of the first word on the line and the + # first word after the : + if line.startswith("*"): + match = VERSION_HISTORY_NEW_LINE_REGEX.match(line) + if not match: + reportError("Version history line malformed. " + "Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s" % line) + else: + first_word = match.groups()[0] + next_word = match.groups()[1] + if first_word_of_prior_line and first_word_of_prior_line > first_word: + reportError( + "Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. " + % (first_word_of_prior_line, first_word, line)) + if first_word_of_prior_line == first_word and next_word_to_check and next_word_to_check > next_word: + reportError( + "Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. " + % (next_word_to_check, next_word, line)) + first_word_of_prior_line = first_word + next_word_to_check = next_word def checkFileContents(file_path, checker): @@ -603,8 +622,9 @@ def checkSourceLine(line, file_path, reportError): reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.") if isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \ - ('.counter(' in line or '.gauge(' in line or '.histogram(' in line or \ - '->counter(' in line or '->gauge(' in line or '->histogram(' in line): + ('.counterFromString(' in line or '.gaugeFromString(' in line or \ + '.histogramFromString(' in line or '->counterFromString(' in line or \ + '->gaugeFromString(' in line or '->histogramFromString(' in line): reportError("Don't lookup stats by name at runtime; use StatName saved during construction") if re.search("envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}", line): diff --git a/tools/code_format/format_python_tools.sh b/tools/code_format/format_python_tools.sh index 56587489a73a..ac755e3dff52 100755 --- a/tools/code_format/format_python_tools.sh +++ b/tools/code_format/format_python_tools.sh @@ -1,5 +1,9 @@ #!/bin/bash +"$(dirname "$0")"/../git/modified_since_last_github_commit.sh ./ py || \ + [[ "${FORCE_PYTHON_FORMAT}" == "yes" ]] || \ + { echo "Skipping format_python_tools.sh due to no Python changes"; exit 0; } + . tools/shell_utils.sh set -e diff --git a/tools/gen_compilation_database.py b/tools/gen_compilation_database.py index d07340b84808..b5b3c5a4a1be 100755 --- a/tools/gen_compilation_database.py +++ b/tools/gen_compilation_database.py @@ -10,19 +10,25 @@ def runBazelBuildForCompilationDatabase(bazel_options, bazel_targets): - query = 'attr(include_prefix, ".+", kind(cc_library, deps({})))'.format( - ' union '.join(bazel_targets)) - build_targets = subprocess.check_output(["bazel", "query", query]).decode().splitlines() + query_targets = ' union '.join(bazel_targets) + query = ' union '.join( + q.format(query_targets) for q in [ + 'attr(include_prefix, ".+", kind(cc_library, deps({})))', + 'attr(strip_include_prefix, ".+", kind(cc_library, deps({})))', + 'attr(generator_function, ".*proto_library", kind(cc_.*, deps({})))', + ]) + build_targets = subprocess.check_output(["bazel", "query", "--notool_deps", + query]).decode().splitlines() subprocess.check_call(["bazel", "build"] + bazel_options + build_targets) # This method is equivalent to https://github.com/grailbio/bazel-compilation-database/blob/master/generate.sh def generateCompilationDatabase(args): - # We need to download all remote outputs for generated source code, we don't care about built - # binaries so just always strip and use dynamic link to minimize download size. + # We need to download all remote outputs for generated source code. This option lives here to override those + # specified in bazelrc. bazel_options = shlex.split(os.environ.get("BAZEL_BUILD_OPTIONS", "")) + [ - "-c", "fastbuild", "--build_tag_filters=-manual", - "--experimental_remote_download_outputs=all", "--strip=always" + "--config=compdb", + "--remote_download_outputs=all", ] if args.run_bazel_build: runBazelBuildForCompilationDatabase(bazel_options, args.bazel_targets) diff --git a/tools/git/last_github_commit.sh b/tools/git/last_github_commit.sh new file mode 100755 index 000000000000..9746d259ac3b --- /dev/null +++ b/tools/git/last_github_commit.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# Looking back from HEAD, find the first commit that was merged onto master by GitHub. This is +# likely the last non-local change on a given branch. There may be some exceptions for this +# heuristic, e.g. when patches are manually merged for security fixes on master, but this is very +# rare. + +git rev-list --no-merges --committer="GitHub " --max-count=1 HEAD diff --git a/tools/git/modified_since_last_github_commit.sh b/tools/git/modified_since_last_github_commit.sh new file mode 100755 index 000000000000..bbb9d388a239 --- /dev/null +++ b/tools/git/modified_since_last_github_commit.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +declare -r BASE="$(dirname "$0")" +declare -r TARGET_PATH=$1 +declare -r EXTENSION=$2 + +git diff --name-only $("${BASE}"/last_github_commit.sh)..HEAD | grep "\.${EXTENSION}$" diff --git a/tools/proto_format/active_protos_gen.py b/tools/proto_format/active_protos_gen.py new file mode 100755 index 000000000000..37b871d93f2e --- /dev/null +++ b/tools/proto_format/active_protos_gen.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 + +# Generate ./api/versioning/BUILD based on packages with files containing +# "package_version_status = ACTIVE." + +import os +import string +import subprocess +import sys + +BUILD_FILE_TEMPLATE = string.Template( + """# DO NOT EDIT. This file is generated by tools/proto_format/active_protos_gen.py. + +licenses(["notice"]) # Apache 2 + +load("@rules_proto//proto:defs.bzl", "proto_library") + +# This tracks active development versions of protos. +proto_library( + name = "active_protos", + visibility = ["//visibility:public"], + deps = [ +$active_pkgs ], +) + +# This tracks frozen versions of protos. +proto_library( + name = "frozen_protos", + visibility = ["//visibility:public"], + deps = [ +$frozen_pkgs ], +) +""") + + +# Key sort function to achieve consistent results with buildifier. +def BuildOrderKey(key): + return key.replace(':', '!') + + +def DepsFormat(pkgs): + if not pkgs: + return '' + return '\n'.join( + ' "//%s:pkg",' % p.replace('.', '/') for p in sorted(pkgs, key=BuildOrderKey)) + '\n' + + +# Find packages with a given package version status in a given API tree root. +def FindPkgs(package_version_status, api_root): + try: + active_files = subprocess.check_output( + ['grep', '-l', '-r', + 'package_version_status = %s;' % package_version_status, + api_root]).decode().strip().split('\n') + api_protos = [f for f in active_files if f.endswith('.proto')] + except subprocess.CalledProcessError: + api_protos = [] + return set([os.path.dirname(p)[len(api_root) + 1:] for p in api_protos]) + + +if __name__ == '__main__': + api_root = sys.argv[1] + active_pkgs = FindPkgs('ACTIVE', api_root) + frozen_pkgs = FindPkgs('FROZEN', api_root) + sys.stdout.write( + BUILD_FILE_TEMPLATE.substitute(active_pkgs=DepsFormat(active_pkgs), + frozen_pkgs=DepsFormat(frozen_pkgs))) diff --git a/tools/proto_format/proto_format.sh b/tools/proto_format/proto_format.sh index 2f29c8ba650a..2dfcb1e37840 100755 --- a/tools/proto_format/proto_format.sh +++ b/tools/proto_format/proto_format.sh @@ -3,34 +3,68 @@ # Reformat API protos to canonical proto style using protoxform. set -e +set -x -[[ "$1" == "check" || "$1" == "fix" ]] || (echo "Usage: $0 "; exit 1) +[[ "$1" == "check" || "$1" == "fix" || "$1" == "freeze" ]] || \ + (echo "Usage: $0 "; exit 1) -# Clean up any stale files in the API tree output. Bazel remembers valid cached -# files still. -# rm -rf bazel-bin/external/envoy_api +# Developers working on protoxform and other proto format tooling changes will need to override the +# following check by setting FORCE_PROTO_FORMAT=yes in the environment. +./tools/git/modified_since_last_github_commit.sh ./api/envoy proto || \ + [[ "${FORCE_PROTO_FORMAT}" == "yes" ]] || \ + { echo "Skipping proto_format.sh due to no API change"; exit 0; } -# Find all source protos. -declare -r PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, @envoy_api_canonical//docs:protos))") +if [[ "$2" == "--test" ]] +then + echo "protoxform_test..." + ./tools/protoxform/protoxform_test.sh + bazel test ${BAZEL_BUILD_OPTIONS} //tools/protoxform:merge_active_shadow_test +fi + +# Generate //versioning:active_protos. +./tools/proto_format/active_protos_gen.py ./api > ./api/versioning/BUILD # This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files. BAZEL_BUILD_OPTIONS+=" --remote_download_outputs=all" -# TODO(htuch): This script started life by cloning docs/build.sh. It depends on -# the @envoy_api_canonical//docs:protos target in a few places as a result. This is not -# guaranteed to be the precise set of protos we want to format, but as a -# starting place it seems reasonable. In the future, we should change the logic -# here. -bazel build ${BAZEL_BUILD_OPTIONS} --//tools/api_proto_plugin:default_type_db_target=@envoy_api_canonical//docs:protos \ - @envoy_api_canonical//docs:protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto \ - --action_env=CPROFILE_ENABLED=1 --host_force_python=PY3 +# If the specified command is 'freeze', we tell protoxform to adjust package version status to +# reflect a major version freeze and then do a regular 'fix'. +PROTO_SYNC_CMD="$1" +if [[ "$1" == "freeze" ]] +then + declare -r FREEZE_ARG="--//tools/api_proto_plugin:extra_args=freeze" + PROTO_SYNC_CMD="fix" +fi +# Invoke protoxform aspect. +bazel build ${BAZEL_BUILD_OPTIONS} --//tools/api_proto_plugin:default_type_db_target=@envoy_api_canonical//versioning:active_protos ${FREEZE_ARG} \ + @envoy_api_canonical//versioning:active_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto + +# Find all source protos. +declare -r ACTIVE_PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, @envoy_api_canonical//versioning:active_protos))") +declare -r FROZEN_PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, @envoy_api_canonical//versioning:frozen_protos))") + +# Setup for proto_sync.py. TOOLS=$(dirname $(dirname $(realpath $0))) -# to satisfy dependency on api_proto_plugin +# To satisfy dependency on api_proto_plugin. export PYTHONPATH="$TOOLS" -./tools/proto_format/proto_sync.py "--mode=$1" ${PROTO_TARGETS} +# Build protoprint and merge_active_shadow_tools for use in proto_sync.py. +bazel build ${BAZEL_BUILD_OPTIONS} //tools/protoxform:protoprint //tools/protoxform:merge_active_shadow + +# Copy back the FileDescriptorProtos that protoxform emittted to the source tree. This involves +# pretty-printing to format with protoprint and potentially merging active/shadow versions of protos +# with merge_active_shadow. +./tools/proto_format/proto_sync.py "--mode=${PROTO_SYNC_CMD}" ${ACTIVE_PROTO_TARGETS} ${FROZEN_PROTO_TARGETS} + +# Need to regenerate //versioning:active_protos before building type DB below if freezing. +if [[ "$1" == "freeze" ]] +then + ./tools/proto_format/active_protos_gen.py ./api > ./api/versioning/BUILD +fi -bazel build ${BAZEL_BUILD_OPTIONS} //tools/type_whisperer:api_build_file +# Generate api/BUILD file based on updated type database. +bazel build ${BAZEL_BUILD_OPTIONS} //tools/type_whisperer:api_build_file cp -f bazel-bin/tools/type_whisperer/BUILD.api_build_file api/BUILD -cp -f ./api/bazel/*.bzl ./api/bazel/BUILD ./generated_api_shadow/bazel +# Misc. manual copies to keep generated_api_shadow/ in sync with api/. +cp -f ./api/bazel/*.bzl ./api/bazel/BUILD ./generated_api_shadow/bazel diff --git a/tools/proto_format/proto_sync.py b/tools/proto_format/proto_sync.py index 748bfb25f8e5..678480795c31 100755 --- a/tools/proto_format/proto_sync.py +++ b/tools/proto_format/proto_sync.py @@ -1,8 +1,16 @@ #!/usr/bin/env python3 -# Diff or copy protoxform artifacts from Bazel cache back to the source tree. +# 1. Take protoxform artifacts from Bazel cache and pretty-print with protoprint.py. +# 2. In the case where we are generating an Envoy internal shadow, it may be +# necessary to combine the current active proto, subject to hand editing, with +# shadow artifacts from the previous verion; this is done via +# merge_active_shadow.py. +# 3. Diff or copy resulting artifacts to the source tree. import argparse +from collections import defaultdict +import functools +import multiprocessing as mp import os import pathlib import re @@ -46,7 +54,7 @@ IMPORT_REGEX = re.compile('import "(.*)";') SERVICE_REGEX = re.compile('service \w+ {') -PACKAGE_REGEX = re.compile('\npackage ([^="]*);') +PACKAGE_REGEX = re.compile('\npackage: "([^"]*)"') PREVIOUS_MESSAGE_TYPE_REGEX = re.compile(r'previous_message_type\s+=\s+"([^"]*)";') @@ -87,20 +95,89 @@ def GetDestinationPath(src): matches[0])).joinpath(src_path.name.split('.')[0] + ".proto") -def SyncProtoFile(cmd, src, dst_root): - """Diff or in-place update a single proto file from protoxform.py Bazel cache artifacts." +def GetAbsDestinationPath(dst_root, src): + """Obtain absolute path from a proto file path combined with destination root. + + Creates the parent directory if necessary. Args: - cmd: 'check' or 'fix'. + dst_root: destination root path. src: source path. """ - # Skip empty files, this indicates this file isn't modified in this version. - if os.stat(src).st_size == 0: - return [] rel_dst_path = GetDestinationPath(src) dst = dst_root.joinpath(rel_dst_path) - dst.parent.mkdir(0o755, True, True) - shutil.copyfile(src, str(dst)) + dst.parent.mkdir(0o755, parents=True, exist_ok=True) + return dst + + +def ProtoPrint(src, dst): + """Pretty-print FileDescriptorProto to a destination file. + + Args: + src: source path for FileDescriptorProto. + dst: destination path for formatted proto. + """ + print('ProtoPrint %s' % dst) + subprocess.check_output([ + 'bazel-bin/tools/protoxform/protoprint', src, + str(dst), + './bazel-bin/tools/protoxform/protoprint.runfiles/envoy/tools/type_whisperer/api_type_db.pb_text' + ]) + + +def MergeActiveShadow(active_src, shadow_src, dst): + """Merge active/shadow FileDescriptorProto to a destination file. + + Args: + active_src: source path for active FileDescriptorProto. + shadow_src: source path for active FileDescriptorProto. + dst: destination path for FileDescriptorProto. + """ + print('MergeActiveShadow %s' % dst) + subprocess.check_output([ + 'bazel-bin/tools/protoxform/merge_active_shadow', + active_src, + shadow_src, + dst, + ]) + + +def SyncProtoFile(dst_srcs): + """Pretty-print a proto descriptor from protoxform.py Bazel cache artifacts." + + In the case where we are generating an Envoy internal shadow, it may be + necessary to combine the current active proto, subject to hand editing, with + shadow artifacts from the previous verion; this is done via + MergeActiveShadow(). + + Args: + dst_srcs: destination/sources path tuple. + """ + dst, srcs = dst_srcs + assert (len(srcs) > 0) + # If we only have one candidate source for a destination, just pretty-print. + if len(srcs) == 1: + src = srcs[0] + ProtoPrint(src, dst) + else: + # We should only see an active and next major version candidate from + # previous version today. + assert (len(srcs) == 2) + shadow_srcs = [ + s for s in srcs if s.endswith('.next_major_version_candidate.envoy_internal.proto') + ] + active_src = [s for s in srcs if s.endswith('active_or_frozen.proto')][0] + # If we're building the shadow, we need to combine the next major version + # candidate shadow with the potentially hand edited active version. + if len(shadow_srcs) > 0: + assert (len(shadow_srcs) == 1) + with tempfile.NamedTemporaryFile() as f: + MergeActiveShadow(active_src, shadow_srcs[0], f.name) + ProtoPrint(f.name, dst) + else: + ProtoPrint(active_src, dst) + src = active_src + rel_dst_path = GetDestinationPath(src) return ['//%s:pkg' % str(rel_dst_path.parent)] @@ -247,17 +324,26 @@ def GenerateCurrentApiDir(api_dir, dst_dir): shutil.rmtree(str(dst.joinpath("service", "auth", "v2alpha"))) +def GitStatus(path): + return subprocess.check_output(['git', 'status', '--porcelain', str(path)]).decode() + + def Sync(api_root, mode, labels, shadow): - pkg_deps = [] with tempfile.TemporaryDirectory() as tmp: dst_dir = pathlib.Path(tmp).joinpath("b") + paths = [] for label in labels: - pkg_deps += SyncProtoFile(mode, utils.BazelBinPathForOutputArtifact(label, '.v2.proto'), - dst_dir) - pkg_deps += SyncProtoFile( - mode, + paths.append(utils.BazelBinPathForOutputArtifact(label, '.active_or_frozen.proto')) + paths.append( utils.BazelBinPathForOutputArtifact( - label, '.v3.envoy_internal.proto' if shadow else '.v3.proto'), dst_dir) + label, '.next_major_version_candidate.envoy_internal.proto' + if shadow else '.next_major_version_candidate.proto')) + dst_src_paths = defaultdict(list) + for path in paths: + if os.stat(path).st_size > 0: + dst_src_paths[GetAbsDestinationPath(dst_dir, path)].append(path) + with mp.Pool() as p: + pkg_deps = p.map(SyncProtoFile, dst_src_paths.items()) SyncBuildFiles(mode, dst_dir) current_api_dir = pathlib.Path(tmp).joinpath("a") @@ -282,6 +368,14 @@ def Sync(api_root, mode, labels, shadow): print(diff.decode(), file=sys.stderr) sys.exit(1) if mode == "fix": + git_status = GitStatus(api_root) + if git_status: + print('git status indicates a dirty API tree:\n%s' % git_status) + print( + 'Proto formatting may overwrite or delete files in the above list with no git backup.' + ) + if input('Continue? [yN] ').strip().lower() != 'y': + sys.exit(1) src_files = set(str(p.relative_to(current_api_dir)) for p in current_api_dir.rglob('*')) dst_files = set(str(p.relative_to(dst_dir)) for p in dst_dir.rglob('*')) deleted_files = src_files.difference(dst_files) diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index a3aa5d5987b1..812ceac0c66b 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -3,7 +3,6 @@ licenses(["notice"]) # Apache 2 py_binary( name = "generate_empty", srcs = ["generate_empty.py"], - python_version = "PY3", visibility = ["//visibility:public"], deps = [":protodoc"], ) @@ -11,7 +10,6 @@ py_binary( py_binary( name = "protodoc", srcs = ["protodoc.py"], - python_version = "PY3", visibility = ["//visibility:public"], deps = [ "//tools/api_proto_plugin", diff --git a/tools/protoxform/BUILD b/tools/protoxform/BUILD index 268579f06be6..d5d46bce81ee 100644 --- a/tools/protoxform/BUILD +++ b/tools/protoxform/BUILD @@ -1,5 +1,26 @@ licenses(["notice"]) # Apache 2 +py_binary( + name = "merge_active_shadow", + srcs = ["merge_active_shadow.py"], + deps = [ + "@com_envoyproxy_protoc_gen_validate//validate:validate_py", + "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", + "@com_google_googleapis//google/api:annotations_py_proto", + "@com_google_protobuf//:protobuf_python", + "@envoy_api_canonical//envoy/annotations:pkg_py_proto", + ], +) + +py_test( + name = "merge_active_shadow_test", + srcs = ["merge_active_shadow_test.py"], + deps = [ + ":merge_active_shadow", + "@com_google_protobuf//:protobuf_python", + ], +) + py_binary( name = "protoxform", srcs = [ @@ -8,11 +29,30 @@ py_binary( "protoxform.py", "utils.py", ], - data = ["//:.clang-format"], - python_version = "PY3", visibility = ["//visibility:public"], deps = [ "//tools/api_proto_plugin", + "//tools/type_whisperer:api_type_db_proto_py_proto", + "@com_envoyproxy_protoc_gen_validate//validate:validate_py", + "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", + "@com_google_googleapis//google/api:annotations_py_proto", + "@envoy_api_canonical//envoy/annotations:pkg_py_proto", + ], +) + +py_binary( + name = "protoprint", + srcs = [ + "options.py", + "protoprint.py", + "utils.py", + ], + data = [ + "//:.clang-format", + "//tools/type_whisperer:api_type_db.pb_text", + ], + visibility = ["//visibility:public"], + deps = [ "//tools/type_whisperer", "//tools/type_whisperer:api_type_db_proto_py_proto", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", diff --git a/tools/protoxform/merge_active_shadow.py b/tools/protoxform/merge_active_shadow.py new file mode 100644 index 000000000000..ecd3cf31c123 --- /dev/null +++ b/tools/protoxform/merge_active_shadow.py @@ -0,0 +1,142 @@ +# Merge active and previous version's generated next major version candidate +# shadow. This involve simultaneously traversing both FileDescriptorProtos and: +# 1. Recovering hidden_envoy_depreacted_* fields and enum values in active proto. +# 2. Recovering deprecated (sub)message types. +# 3. Misc. fixups for oneof metadata and reserved ranges/names. + +import copy +import pathlib +import sys + +from google.protobuf import descriptor_pb2 +from google.protobuf import text_format + +# Note: we have to include those proto definitions for text_format sanity. +from google.api import annotations_pb2 as _ +from validate import validate_pb2 as _ +from envoy.annotations import deprecation_pb2 as _ +from envoy.annotations import resource_pb2 as _ +from udpa.annotations import migrate_pb2 as _ +from udpa.annotations import sensitive_pb2 as _ +from udpa.annotations import status_pb2 as _ +from udpa.annotations import versioning_pb2 as _ + + +# Set reserved_range in target_proto to reflex previous_reserved_range skipping +# skip_reserved_numbers. +def AdjustReservedRange(target_proto, previous_reserved_range, skip_reserved_numbers): + del target_proto.reserved_range[:] + for rr in previous_reserved_range: + # We can only handle singleton ranges today. + assert ((rr.start == rr.end) or (rr.end == rr.start + 1)) + if rr.start not in skip_reserved_numbers: + target_proto.reserved_range.add().MergeFrom(rr) + + +# Merge active/shadow EnumDescriptorProtos to a fresh target EnumDescriptorProto. +def MergeActiveShadowEnum(active_proto, shadow_proto, target_proto): + target_proto.MergeFrom(active_proto) + shadow_values = {v.name: v for v in shadow_proto.value} + skip_reserved_numbers = [] + # For every reserved name, check to see if it's in the shadow, and if so, + # reintroduce in target_proto. + del target_proto.reserved_name[:] + for n in active_proto.reserved_name: + hidden_n = 'hidden_envoy_deprecated_' + n + if hidden_n in shadow_values: + v = shadow_values[hidden_n] + skip_reserved_numbers.append(v.number) + target_proto.value.add().MergeFrom(v) + else: + target_proto.reserved_name.append(n) + AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers) + # Special fixup for deprecation of default enum values. + for tv in target_proto.value: + if tv.name == 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE': + for sv in shadow_proto.value: + if sv.number == tv.number: + assert (sv.number == 0) + tv.CopyFrom(sv) + + +# Merge active/shadow DescriptorProtos to a fresh target DescriptorProto. +def MergeActiveShadowMessage(active_proto, shadow_proto, target_proto): + target_proto.MergeFrom(active_proto) + shadow_fields = {f.name: f for f in shadow_proto.field} + skip_reserved_numbers = [] + # For every reserved name, check to see if it's in the shadow, and if so, + # reintroduce in target_proto. + del target_proto.reserved_name[:] + for n in active_proto.reserved_name: + hidden_n = 'hidden_envoy_deprecated_' + n + if hidden_n in shadow_fields: + f = shadow_fields[hidden_n] + skip_reserved_numbers.append(f.number) + missing_field = target_proto.field.add() + missing_field.MergeFrom(f) + # oneof fields from the shadow need to have their index set to the + # corresponding index in active/target_proto. + if missing_field.HasField('oneof_index'): + oneof_name = shadow_proto.oneof_decl[missing_field.oneof_index].name + missing_oneof_index = None + for oneof_index, oneof_decl in enumerate(active_proto.oneof_decl): + if oneof_decl.name == oneof_name: + missing_oneof_index = oneof_index + assert (missing_oneof_index is not None) + missing_field.oneof_index = missing_oneof_index + else: + target_proto.reserved_name.append(n) + # protoprint.py expects that oneof fields are consecutive, so need to sort for + # this. + if len(active_proto.oneof_decl) > 0: + fields = copy.deepcopy(target_proto.field) + fields.sort(key=lambda f: f.oneof_index if f.HasField('oneof_index') else -1) + del target_proto.field[:] + for f in fields: + target_proto.field.append(f) + AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers) + # Visit nested message types + del target_proto.nested_type[:] + shadow_msgs = {msg.name: msg for msg in shadow_proto.nested_type} + for msg in active_proto.nested_type: + MergeActiveShadowMessage(msg, shadow_msgs[msg.name], target_proto.nested_type.add()) + # Visit nested enum types + del target_proto.enum_type[:] + shadow_enums = {msg.name: msg for msg in shadow_proto.enum_type} + for enum in active_proto.enum_type: + MergeActiveShadowEnum(enum, shadow_enums[enum.name], target_proto.enum_type.add()) + # Ensure target has any deprecated sub-message types in case they are needed. + active_msg_names = set([msg.name for msg in active_proto.nested_type]) + for msg in shadow_proto.nested_type: + if msg.name not in active_msg_names: + target_proto.nested_type.add().MergeFrom(msg) + + +# Merge active/shadow FileDescriptorProtos, returning a the resulting FileDescriptorProto. +def MergeActiveShadowFile(active_file_proto, shadow_file_proto): + target_file_proto = copy.deepcopy(active_file_proto) + # Visit message types + del target_file_proto.message_type[:] + shadow_msgs = {msg.name: msg for msg in shadow_file_proto.message_type} + for msg in active_file_proto.message_type: + MergeActiveShadowMessage(msg, shadow_msgs[msg.name], target_file_proto.message_type.add()) + # Visit enum types + del target_file_proto.enum_type[:] + shadow_enums = {msg.name: msg for msg in shadow_file_proto.enum_type} + for enum in active_file_proto.enum_type: + MergeActiveShadowEnum(enum, shadow_enums[enum.name], target_file_proto.enum_type.add()) + # Ensure target has any deprecated message types in case they are needed. + active_msg_names = set([msg.name for msg in active_file_proto.message_type]) + for msg in shadow_file_proto.message_type: + if msg.name not in active_msg_names: + target_file_proto.message_type.add().MergeFrom(msg) + return target_file_proto + + +if __name__ == '__main__': + active_src, shadow_src, dst = sys.argv[1:] + active_proto = descriptor_pb2.FileDescriptorProto() + text_format.Merge(pathlib.Path(active_src).read_text(), active_proto) + shadow_proto = descriptor_pb2.FileDescriptorProto() + text_format.Merge(pathlib.Path(shadow_src).read_text(), shadow_proto) + pathlib.Path(dst).write_text(str(MergeActiveShadowFile(active_proto, shadow_proto))) diff --git a/tools/protoxform/merge_active_shadow_test.py b/tools/protoxform/merge_active_shadow_test.py new file mode 100644 index 000000000000..8f7c98d4fa7b --- /dev/null +++ b/tools/protoxform/merge_active_shadow_test.py @@ -0,0 +1,241 @@ +import unittest + +import merge_active_shadow + +from google.protobuf import descriptor_pb2 +from google.protobuf import text_format + + +class MergeActiveShadowTest(unittest.TestCase): + + # Poor man's text proto equivalence. Tensorflow has better tools for this, + # i.e. assertProto2Equal. + def assertTextProtoEq(self, lhs, rhs): + self.assertMultiLineEqual(lhs.strip(), rhs.strip()) + + def testAdjustReservedRange(self): + """AdjustReservedRange removes specified skip_reserved_numbers.""" + desc_pb_text = """ +reserved_range { + start: 41 + end: 41 +} +reserved_range { + start: 42 + end: 42 +} +reserved_range { + start: 43 + end: 44 +} +reserved_range { + start: 50 + end: 51 +} + """ + desc = descriptor_pb2.DescriptorProto() + text_format.Merge(desc_pb_text, desc) + target = descriptor_pb2.DescriptorProto() + merge_active_shadow.AdjustReservedRange(target, desc.reserved_range, [42, 43]) + target_pb_text = """ +reserved_range { + start: 41 + end: 41 +} +reserved_range { + start: 50 + end: 51 +} + """ + self.assertTextProtoEq(target_pb_text, str(target)) + + def testMergeActiveShadowEnum(self): + """MergeActiveShadowEnum recovers shadow values.""" + active_pb_text = """ +value { + number: 1 + name: "foo" +} +value { + number: 0 + name: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE" +} +value { + number: 3 + name: "bar" +} +reserved_name: "baz" +reserved_range { + start: 2 + end: 3 +} + """ + active_proto = descriptor_pb2.EnumDescriptorProto() + text_format.Merge(active_pb_text, active_proto) + shadow_pb_text = """ +value { + number: 1 + name: "foo" +} +value { + number: 0 + name: "wow" +} +value { + number: 3 + name: "bar" +} +value { + number: 2 + name: "hidden_envoy_deprecated_baz" +} +value { + number: 4 + name: "hidden_envoy_deprecated_huh" +} + """ + shadow_proto = descriptor_pb2.EnumDescriptorProto() + text_format.Merge(shadow_pb_text, shadow_proto) + target_proto = descriptor_pb2.EnumDescriptorProto() + merge_active_shadow.MergeActiveShadowEnum(active_proto, shadow_proto, target_proto) + target_pb_text = """ +value { + name: "foo" + number: 1 +} +value { + name: "wow" + number: 0 +} +value { + name: "bar" + number: 3 +} +value { + name: "hidden_envoy_deprecated_baz" + number: 2 +} + """ + self.assertTextProtoEq(target_pb_text, str(target_proto)) + + def testMergeActiveShadowMessage(self): + """MergeActiveShadowMessage recovers shadow fields with oneofs.""" + active_pb_text = """ +field { + number: 1 + name: "foo" +} +field { + number: 0 + name: "bar" + oneof_index: 2 +} +field { + number: 3 + name: "baz" +} +field { + number: 4 + name: "newbie" +} +reserved_name: "wow" +reserved_range { + start: 2 + end: 3 +} +oneof_decl { + name: "ign" +} +oneof_decl { + name: "ign2" +} +oneof_decl { + name: "some_oneof" +} + """ + active_proto = descriptor_pb2.DescriptorProto() + text_format.Merge(active_pb_text, active_proto) + shadow_pb_text = """ +field { + number: 1 + name: "foo" +} +field { + number: 0 + name: "bar" +} +field { + number: 3 + name: "baz" +} +field { + number: 2 + name: "hidden_envoy_deprecated_wow" + oneof_index: 0 +} +oneof_decl { + name: "some_oneof" +} + """ + shadow_proto = descriptor_pb2.DescriptorProto() + text_format.Merge(shadow_pb_text, shadow_proto) + target_proto = descriptor_pb2.DescriptorProto() + merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + target_pb_text = """ +field { + name: "foo" + number: 1 +} +field { + name: "baz" + number: 3 +} +field { + name: "newbie" + number: 4 +} +field { + name: "bar" + number: 0 + oneof_index: 2 +} +field { + name: "hidden_envoy_deprecated_wow" + number: 2 + oneof_index: 2 +} +oneof_decl { + name: "ign" +} +oneof_decl { + name: "ign2" +} +oneof_decl { + name: "some_oneof" +} + """ + self.assertTextProtoEq(target_pb_text, str(target_proto)) + + def testMergeActiveShadowMessageMissing(self): + """MergeActiveShadowMessage recovers missing messages from shadow.""" + active_proto = descriptor_pb2.DescriptorProto() + shadow_proto = descriptor_pb2.DescriptorProto() + shadow_proto.nested_type.add().name = 'foo' + target_proto = descriptor_pb2.DescriptorProto() + merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + self.assertEqual(target_proto.nested_type[0].name, 'foo') + + def testMergeActiveShadowFileMissing(self): + """MergeActiveShadowFile recovers missing messages from shadow.""" + active_proto = descriptor_pb2.FileDescriptorProto() + shadow_proto = descriptor_pb2.FileDescriptorProto() + shadow_proto.message_type.add().name = 'foo' + target_proto = descriptor_pb2.DescriptorProto() + target_proto = merge_active_shadow.MergeActiveShadowFile(active_proto, shadow_proto) + self.assertEqual(target_proto.message_type[0].name, 'foo') + + +# TODO(htuch): add some test for recursion. + +if __name__ == '__main__': + unittest.main() diff --git a/tools/protoxform/migrate.py b/tools/protoxform/migrate.py index abd984c852cd..1be44af91acb 100644 --- a/tools/protoxform/migrate.py +++ b/tools/protoxform/migrate.py @@ -10,6 +10,7 @@ from envoy.annotations import resource_pb2 from udpa.annotations import migrate_pb2 +from udpa.annotations import status_pb2 from google.api import annotations_pb2 ENVOY_API_TYPE_REGEX_STR = 'envoy_api_(msg|enum_value|field|enum)_([\w\.]+)' @@ -23,9 +24,11 @@ class UpgradeVisitor(visitor.Visitor): See visitor.Visitor for visitor method docs comments. """ - def __init__(self, typedb, envoy_internal_shadow): + def __init__(self, n, typedb, envoy_internal_shadow, package_version_status): + self._base_version = n self._typedb = typedb self._envoy_internal_shadow = envoy_internal_shadow + self._package_version_status = package_version_status def _UpgradedComment(self, c): @@ -74,7 +77,7 @@ def UpgradeType(match): return re.sub(ENVOY_COMMENT_WITH_TYPE_REGEX, UpgradeType, c) def _UpgradedPostMethod(self, m): - return re.sub(r'^/v2/', '/v3/', m) + return re.sub(r'^/v%d/' % self._base_version, '/v%d/' % (self._base_version + 1), m) # Upgraded type using canonical type naming, e.g. foo.bar. def _UpgradedTypeCanonical(self, t): @@ -212,6 +215,8 @@ def VisitFile(self, file_proto, type_context, services, msgs, enums): upgraded_proto.package = self._typedb.next_version_protos[upgraded_proto.name].qualified_package upgraded_proto.name = self._typedb.next_version_protos[upgraded_proto.name].proto_path upgraded_proto.options.ClearExtension(migrate_pb2.file_migrate) + upgraded_proto.options.Extensions[ + status_pb2.file_status].package_version_status = self._package_version_status # Upgrade comments. for location in upgraded_proto.source_code_info.location: location.leading_comments = self._UpgradedComment(location.leading_comments) @@ -231,15 +236,17 @@ def VisitFile(self, file_proto, type_context, services, msgs, enums): return upgraded_proto -def V3MigrationXform(envoy_internal_shadow, file_proto): - """Transform a FileDescriptorProto from v2[alpha\d] to v3. +def VersionUpgradeXform(n, envoy_internal_shadow, file_proto, params): + """Transform a FileDescriptorProto from vN[alpha\d] to v(N+1). Args: + n: version N to upgrade from. envoy_internal_shadow: generate a shadow for Envoy internal use containing deprecated fields. - file_proto: v2[alpha\d] FileDescriptorProto message. + file_proto: vN[alpha\d] FileDescriptorProto message. + params: plugin parameters. Returns: - v3 FileDescriptorProto message. + v(N+1) FileDescriptorProto message. """ # Load type database. typedb = utils.GetTypeDb() @@ -248,4 +255,15 @@ def V3MigrationXform(envoy_internal_shadow, file_proto): file_proto.name]: return None # Otherwise, this .proto needs upgrading, do it. - return traverse.TraverseFile(file_proto, UpgradeVisitor(typedb, envoy_internal_shadow)) + freeze = 'extra_args' in params and params['extra_args'] == 'freeze' + existing_pkg_version_status = file_proto.options.Extensions[ + status_pb2.file_status].package_version_status + # Normally, we are generating the NEXT_MAJOR_VERSION_CANDIDATE. However, if + # freezing and previously this was the active major version, the migrated + # version is now the ACTIVE version. + if freeze and existing_pkg_version_status == status_pb2.ACTIVE: + package_version_status = status_pb2.ACTIVE + else: + package_version_status = status_pb2.NEXT_MAJOR_VERSION_CANDIDATE + return traverse.TraverseFile( + file_proto, UpgradeVisitor(n, typedb, envoy_internal_shadow, package_version_status)) diff --git a/tools/protoxform/protoprint.py b/tools/protoxform/protoprint.py new file mode 100755 index 000000000000..51bb9c573795 --- /dev/null +++ b/tools/protoxform/protoprint.py @@ -0,0 +1,611 @@ +# FileDescriptorProtos pretty-printer tool. +# +# protoprint.py provides the canonical .proto formatting for the Envoy APIs. +# +# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto +# for the underlying protos mentioned in this file. +# +# Usage: protoprint.py + +from collections import deque +import copy +import functools +import io +import os +import pathlib +import re +import subprocess +import sys + +from tools.api_proto_plugin import annotations +from tools.api_proto_plugin import plugin +from tools.api_proto_plugin import traverse +from tools.api_proto_plugin import visitor +from tools.protoxform import options as protoxform_options +from tools.protoxform import utils +from tools.type_whisperer import type_whisperer +from tools.type_whisperer.types_pb2 import Types + +from google.protobuf import descriptor_pb2 +from google.protobuf import text_format + +# Note: we have to include those proto definitions to make FormatOptions work, +# this also serves as whitelist of extended options. +from google.api import annotations_pb2 as _ +from validate import validate_pb2 as _ +from envoy.annotations import deprecation_pb2 as _ +from envoy.annotations import resource_pb2 +from udpa.annotations import migrate_pb2 +from udpa.annotations import sensitive_pb2 as _ +from udpa.annotations import status_pb2 + +NEXT_FREE_FIELD_MIN = 5 + + +class ProtoPrintError(Exception): + """Base error class for the protoprint module.""" + + +def ExtractClangProtoStyle(clang_format_text): + """Extract a key:value dictionary for proto formatting. + + Args: + clang_format_text: text from a .clang-format file. + + Returns: + key:value dictionary suitable for passing to clang-format --style. + """ + lang = None + format_dict = {} + for line in clang_format_text.split('\n'): + if lang is None or lang != 'Proto': + match = re.match('Language:\s+(\w+)', line) + if match: + lang = match.group(1) + continue + match = re.match('(\w+):\s+(\w+)', line) + if match: + key, value = match.groups() + format_dict[key] = value + else: + break + return str(format_dict) + + +# Ensure we are using the canonical clang-format proto style. +CLANG_FORMAT_STYLE = ExtractClangProtoStyle(pathlib.Path('.clang-format').read_text()) + + +def ClangFormat(contents): + """Run proto-style oriented clang-format over given string. + + Args: + contents: a string with proto contents. + + Returns: + clang-formatted string + """ + return subprocess.run( + ['clang-format', + '--style=%s' % CLANG_FORMAT_STYLE, '--assume-filename=.proto'], + input=contents.encode('utf-8'), + stdout=subprocess.PIPE).stdout + + +def FormatBlock(block): + """Append \n to a .proto section (e.g. + + comment, message definition, etc.) if non-empty. + + Args: + block: a string representing the section. + + Returns: + A string with appropriate whitespace. + """ + if block.strip(): + return block + '\n' + return '' + + +def FormatComments(comments): + """Format a list of comment blocks from SourceCodeInfo. + + Prefixes // to each line, separates blocks by spaces. + + Args: + comments: a list of blocks, each block is a list of strings representing + lines in each block. + + Returns: + A string reprenting the formatted comment blocks. + """ + + # TODO(htuch): not sure why this is needed, but clang-format does some weird + # stuff with // comment indents when we have these trailing \ + def FixupTrailingBackslash(s): + return s[:-1].rstrip() if s.endswith('\\') else s + + comments = '\n\n'.join( + '\n'.join(['//%s' % FixupTrailingBackslash(line) + for line in comment.split('\n')[:-1]]) + for comment in comments) + return FormatBlock(comments) + + +def CreateNextFreeFieldXform(msg_proto): + """Return the next free field number annotation transformer of a message. + + Args: + msg_proto: DescriptorProto for message. + + Returns: + the next free field number annotation transformer. + """ + next_free = max( + sum([ + [f.number + 1 for f in msg_proto.field], + [rr.end for rr in msg_proto.reserved_range], + [ex.end for ex in msg_proto.extension_range], + ], [1])) + return lambda _: next_free if next_free > NEXT_FREE_FIELD_MIN else None + + +def FormatTypeContextComments(type_context, annotation_xforms=None): + """Format the leading/trailing comments in a given TypeContext. + + Args: + type_context: contextual information for message/enum/field. + annotation_xforms: a dict of transformers for annotations in leading + comment. + + Returns: + Tuple of formatted leading and trailing comment blocks. + """ + leading_comment = type_context.leading_comment + if annotation_xforms: + leading_comment = leading_comment.getCommentWithTransforms(annotation_xforms) + leading = FormatComments(list(type_context.leading_detached_comments) + [leading_comment.raw]) + trailing = FormatBlock(FormatComments([type_context.trailing_comment])) + return leading, trailing + + +def FormatHeaderFromFile(source_code_info, file_proto, empty_file): + """Format proto header. + + Args: + source_code_info: SourceCodeInfo object. + file_proto: FileDescriptorProto for file. + empty_file: are there no message/enum/service defs in file? + + Returns: + Formatted proto header as a string. + """ + # Load the type database. + typedb = utils.GetTypeDb() + # Figure out type dependencies in this .proto. + types = Types() + text_format.Merge(traverse.TraverseFile(file_proto, type_whisperer.TypeWhispererVisitor()), types) + type_dependencies = sum([list(t.type_dependencies) for t in types.types.values()], []) + for service in file_proto.service: + for m in service.method: + type_dependencies.extend([m.input_type[1:], m.output_type[1:]]) + # Determine the envoy/ import paths from type deps. + envoy_proto_paths = set( + typedb.types[t].proto_path + for t in type_dependencies + if t.startswith('envoy.') and typedb.types[t].proto_path != file_proto.name) + + def CamelCase(s): + return ''.join(t.capitalize() for t in re.split('[\._]', s)) + + package_line = 'package %s;\n' % file_proto.package + file_block = '\n'.join(['syntax = "proto3";\n', package_line]) + + options = descriptor_pb2.FileOptions() + options.java_outer_classname = CamelCase(os.path.basename(file_proto.name)) + options.java_multiple_files = True + options.java_package = 'io.envoyproxy.' + file_proto.package + + # This is a workaround for C#/Ruby namespace conflicts between packages and + # objects, see https://github.com/envoyproxy/envoy/pull/3854. + # TODO(htuch): remove once v3 fixes this naming issue in + # https://github.com/envoyproxy/envoy/issues/8120. + if file_proto.package in ['envoy.api.v2.listener', 'envoy.api.v2.cluster']: + qualified_package = '.'.join(s.capitalize() for s in file_proto.package.split('.')) + 'NS' + options.csharp_namespace = qualified_package + options.ruby_package = qualified_package + + if file_proto.service: + options.java_generic_services = True + + if file_proto.options.HasExtension(migrate_pb2.file_migrate): + options.Extensions[migrate_pb2.file_migrate].CopyFrom( + file_proto.options.Extensions[migrate_pb2.file_migrate]) + + if file_proto.options.HasExtension( + status_pb2.file_status) and file_proto.package.endswith('alpha'): + options.Extensions[status_pb2.file_status].CopyFrom( + file_proto.options.Extensions[status_pb2.file_status]) + + if not empty_file: + options.Extensions[ + status_pb2.file_status].package_version_status = file_proto.options.Extensions[ + status_pb2.file_status].package_version_status + + options_block = FormatOptions(options) + + requires_versioning_import = any( + protoxform_options.GetVersioningAnnotation(m.options) for m in file_proto.message_type) + + envoy_imports = list(envoy_proto_paths) + google_imports = [] + infra_imports = [] + misc_imports = [] + public_imports = [] + + for idx, d in enumerate(file_proto.dependency): + if idx in file_proto.public_dependency: + public_imports.append(d) + continue + elif d.startswith('envoy/annotations') or d.startswith('udpa/annotations'): + infra_imports.append(d) + elif d.startswith('envoy/'): + # We ignore existing envoy/ imports, since these are computed explicitly + # from type_dependencies. + pass + elif d.startswith('google/'): + google_imports.append(d) + elif d.startswith('validate/'): + infra_imports.append(d) + elif d in ['udpa/annotations/versioning.proto', 'udpa/annotations/status.proto']: + # Skip, we decide to add this based on requires_versioning_import and options. + pass + else: + misc_imports.append(d) + + if options.HasExtension(status_pb2.file_status): + infra_imports.append('udpa/annotations/status.proto') + + if requires_versioning_import: + infra_imports.append('udpa/annotations/versioning.proto') + + def FormatImportBlock(xs): + if not xs: + return '' + return FormatBlock('\n'.join(sorted('import "%s";' % x for x in set(xs) if x))) + + def FormatPublicImportBlock(xs): + if not xs: + return '' + return FormatBlock('\n'.join(sorted('import public "%s";' % x for x in xs))) + + import_block = '\n'.join( + map(FormatImportBlock, [envoy_imports, google_imports, misc_imports, infra_imports])) + import_block += '\n' + FormatPublicImportBlock(public_imports) + comment_block = FormatComments(source_code_info.file_level_comments) + + return ''.join(map(FormatBlock, [file_block, import_block, options_block, comment_block])) + + +def NormalizeFieldTypeName(type_context, field_fqn): + """Normalize a fully qualified field type name, e.g. + + .envoy.foo.bar is normalized to foo.bar. + + Considers type context to minimize type prefix. + + Args: + field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. + type_context: contextual information for message/enum/field. + + Returns: + Normalized type name as a string. + """ + if field_fqn.startswith('.'): + # Let's say we have type context namespace a.b.c.d.e and the type we're + # trying to normalize is a.b.d.e. We take (from the end) on package fragment + # at a time, and apply the inner-most evaluation that protoc performs to see + # if we evaluate to the fully qualified type. If so, we're done. It's not + # sufficient to compute common prefix and drop that, since in the above + # example the normalized type name would be d.e, which proto resolves inner + # most as a.b.c.d.e (bad) instead of the intended a.b.d.e. + field_fqn_splits = field_fqn[1:].split('.') + type_context_splits = type_context.name.split('.')[:-1] + remaining_field_fqn_splits = deque(field_fqn_splits[:-1]) + normalized_splits = deque([field_fqn_splits[-1]]) + + def EquivalentInTypeContext(splits): + type_context_splits_tmp = deque(type_context_splits) + while type_context_splits_tmp: + # If we're in a.b.c and the FQN is a.d.Foo, we want to return true once + # we have type_context_splits_tmp as [a] and splits as [d, Foo]. + if list(type_context_splits_tmp) + list(splits) == field_fqn_splits: + return True + # If we're in a.b.c.d.e.f and the FQN is a.b.d.e.Foo, we want to return True + # once we have type_context_splits_tmp as [a] and splits as [b, d, e, Foo], but + # not when type_context_splits_tmp is [a, b, c] and FQN is [d, e, Foo]. + if len(splits) > 1 and '.'.join(type_context_splits_tmp).endswith('.'.join( + list(splits)[:-1])): + return False + type_context_splits_tmp.pop() + return False + + while remaining_field_fqn_splits and not EquivalentInTypeContext(normalized_splits): + normalized_splits.appendleft(remaining_field_fqn_splits.pop()) + + # `extensions` is a keyword in proto2, and protoc will throw error if a type name + # starts with `extensions.`. + if normalized_splits[0] == 'extensions': + normalized_splits.appendleft(remaining_field_fqn_splits.pop()) + + return '.'.join(normalized_splits) + return field_fqn + + +def TypeNameFromFQN(fqn): + return fqn[1:] + + +def FormatFieldType(type_context, field): + """Format a FieldDescriptorProto type description. + + Args: + type_context: contextual information for message/enum/field. + field: FieldDescriptor proto. + + Returns: + Formatted proto field type as string. + """ + label = 'repeated ' if field.label == field.LABEL_REPEATED else '' + type_name = label + NormalizeFieldTypeName(type_context, field.type_name) + + if field.type == field.TYPE_MESSAGE: + if type_context.map_typenames and TypeNameFromFQN( + field.type_name) in type_context.map_typenames: + return 'map<%s, %s>' % tuple( + map(functools.partial(FormatFieldType, type_context), + type_context.map_typenames[TypeNameFromFQN(field.type_name)])) + return type_name + elif field.type_name: + return type_name + + pretty_type_names = { + field.TYPE_DOUBLE: 'double', + field.TYPE_FLOAT: 'float', + field.TYPE_INT32: 'int32', + field.TYPE_SFIXED32: 'int32', + field.TYPE_SINT32: 'int32', + field.TYPE_FIXED32: 'uint32', + field.TYPE_UINT32: 'uint32', + field.TYPE_INT64: 'int64', + field.TYPE_SFIXED64: 'int64', + field.TYPE_SINT64: 'int64', + field.TYPE_FIXED64: 'uint64', + field.TYPE_UINT64: 'uint64', + field.TYPE_BOOL: 'bool', + field.TYPE_STRING: 'string', + field.TYPE_BYTES: 'bytes', + } + if field.type in pretty_type_names: + return label + pretty_type_names[field.type] + raise ProtoPrintError('Unknown field type ' + str(field.type)) + + +def FormatServiceMethod(type_context, method): + """Format a service MethodDescriptorProto. + + Args: + type_context: contextual information for method. + method: MethodDescriptorProto proto. + + Returns: + Formatted service method as string. + """ + + def FormatStreaming(s): + return 'stream ' if s else '' + + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + return '%srpc %s(%s%s%s) returns (%s%s) {%s}\n' % ( + leading_comment, method.name, trailing_comment, FormatStreaming( + method.client_streaming), NormalizeFieldTypeName( + type_context, method.input_type), FormatStreaming(method.server_streaming), + NormalizeFieldTypeName(type_context, method.output_type), FormatOptions(method.options)) + + +def FormatField(type_context, field): + """Format FieldDescriptorProto as a proto field. + + Args: + type_context: contextual information for message/enum/field. + field: FieldDescriptor proto. + + Returns: + Formatted proto field as a string. + """ + if protoxform_options.HasHideOption(field.options): + return '' + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + + return '%s%s %s = %d%s;\n%s' % (leading_comment, FormatFieldType(type_context, field), field.name, + field.number, FormatOptions(field.options), trailing_comment) + + +def FormatEnumValue(type_context, value): + """Format a EnumValueDescriptorProto as a proto enum value. + + Args: + type_context: contextual information for message/enum/field. + value: EnumValueDescriptorProto. + + Returns: + Formatted proto enum value as a string. + """ + if protoxform_options.HasHideOption(value.options): + return '' + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + formatted_annotations = FormatOptions(value.options) + return '%s%s = %d%s;\n%s' % (leading_comment, value.name, value.number, formatted_annotations, + trailing_comment) + + +def TextFormatValue(field, value): + """Format the value as protobuf text format + + Args: + field: a FieldDescriptor that describes the field + value: the value stored in the field + + Returns: + value in protobuf text format + """ + out = io.StringIO() + text_format.PrintFieldValue(field, value, out) + return out.getvalue() + + +def FormatOptions(options): + """Format *Options (e.g. + + MessageOptions, FieldOptions) message. + + Args: + options: A *Options (e.g. MessageOptions, FieldOptions) message. + + Returns: + Formatted options as a string. + """ + + formatted_options = [] + for option_descriptor, option_value in sorted(options.ListFields(), key=lambda x: x[0].number): + option_name = '({})'.format( + option_descriptor.full_name) if option_descriptor.is_extension else option_descriptor.name + if option_descriptor.message_type and option_descriptor.label != option_descriptor.LABEL_REPEATED: + formatted_options.extend([ + '{}.{} = {}'.format(option_name, subfield.name, TextFormatValue(subfield, value)) + for subfield, value in option_value.ListFields() + ]) + else: + formatted_options.append('{} = {}'.format(option_name, + TextFormatValue(option_descriptor, option_value))) + + if formatted_options: + if options.DESCRIPTOR.name in ('EnumValueOptions', 'FieldOptions'): + return '[{}]'.format(','.join(formatted_options)) + else: + return FormatBlock(''.join( + 'option {};\n'.format(formatted_option) for formatted_option in formatted_options)) + return '' + + +def FormatReserved(enum_or_msg_proto): + """Format reserved values/names in a [Enum]DescriptorProto. + + Args: + enum_or_msg_proto: [Enum]DescriptorProto message. + + Returns: + Formatted enum_or_msg_proto as a string. + """ + rrs = copy.deepcopy(enum_or_msg_proto.reserved_range) + # Fixups for singletons that don't seem to always have [inclusive, exclusive) + # format when parsed by protoc. + for rr in rrs: + if rr.start == rr.end: + rr.end += 1 + reserved_fields = FormatBlock( + 'reserved %s;\n' % + ','.join(map(str, sum([list(range(rr.start, rr.end)) for rr in rrs], [])))) if rrs else '' + if enum_or_msg_proto.reserved_name: + reserved_fields += FormatBlock('reserved %s;\n' % + ', '.join('"%s"' % n for n in enum_or_msg_proto.reserved_name)) + return reserved_fields + + +class ProtoFormatVisitor(visitor.Visitor): + """Visitor to generate a proto representation from a FileDescriptor proto. + + See visitor.Visitor for visitor method docs comments. + """ + + def VisitService(self, service_proto, type_context): + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + methods = '\n'.join( + FormatServiceMethod(type_context.ExtendMethod(index, m.name), m) + for index, m in enumerate(service_proto.method)) + options = FormatBlock(FormatOptions(service_proto.options)) + return '%sservice %s {\n%s%s%s\n}\n' % (leading_comment, service_proto.name, options, + trailing_comment, methods) + + def VisitEnum(self, enum_proto, type_context): + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + formatted_options = FormatOptions(enum_proto.options) + reserved_fields = FormatReserved(enum_proto) + values = [ + FormatEnumValue(type_context.ExtendField(index, value.name), value) + for index, value in enumerate(enum_proto.value) + ] + joined_values = ('\n' if any('//' in v for v in values) else '').join(values) + return '%senum %s {\n%s%s%s%s\n}\n' % (leading_comment, enum_proto.name, trailing_comment, + formatted_options, reserved_fields, joined_values) + + def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): + # Skip messages synthesized to represent map types. + if msg_proto.options.map_entry: + return '' + if protoxform_options.HasHideOption(msg_proto.options): + return '' + annotation_xforms = { + annotations.NEXT_FREE_FIELD_ANNOTATION: CreateNextFreeFieldXform(msg_proto) + } + leading_comment, trailing_comment = FormatTypeContextComments(type_context, annotation_xforms) + formatted_options = FormatOptions(msg_proto.options) + formatted_enums = FormatBlock('\n'.join(nested_enums)) + formatted_msgs = FormatBlock('\n'.join(nested_msgs)) + reserved_fields = FormatReserved(msg_proto) + # Recover the oneof structure. This needs some extra work, since + # DescriptorProto just gives use fields and a oneof_index that can allow + # recovery of the original oneof placement. + fields = '' + oneof_index = None + for index, field in enumerate(msg_proto.field): + if oneof_index is not None: + if not field.HasField('oneof_index') or field.oneof_index != oneof_index: + fields += '}\n\n' + oneof_index = None + if oneof_index is None and field.HasField('oneof_index'): + oneof_index = field.oneof_index + assert (oneof_index < len(msg_proto.oneof_decl)) + oneof_proto = msg_proto.oneof_decl[oneof_index] + oneof_leading_comment, oneof_trailing_comment = FormatTypeContextComments( + type_context.ExtendOneof(oneof_index, field.name)) + fields += '%soneof %s {\n%s%s' % (oneof_leading_comment, oneof_proto.name, + oneof_trailing_comment, FormatOptions( + oneof_proto.options)) + fields += FormatBlock(FormatField(type_context.ExtendField(index, field.name), field)) + if oneof_index is not None: + fields += '}\n\n' + return '%smessage %s {\n%s%s%s%s%s%s\n}\n' % (leading_comment, msg_proto.name, trailing_comment, + formatted_options, formatted_enums, + formatted_msgs, reserved_fields, fields) + + def VisitFile(self, file_proto, type_context, services, msgs, enums): + empty_file = len(services) == 0 and len(enums) == 0 and len(msgs) == 0 + header = FormatHeaderFromFile(type_context.source_code_info, file_proto, empty_file) + formatted_services = FormatBlock('\n'.join(services)) + formatted_enums = FormatBlock('\n'.join(enums)) + formatted_msgs = FormatBlock('\n'.join(msgs)) + return ClangFormat(header + formatted_services + formatted_enums + formatted_msgs) + + +if __name__ == '__main__': + proto_desc_path = sys.argv[1] + file_proto = descriptor_pb2.FileDescriptorProto() + input_text = pathlib.Path(proto_desc_path).read_text() + if not input_text: + sys.exit(0) + text_format.Merge(input_text, file_proto) + dst_path = pathlib.Path(sys.argv[2]) + utils.LoadTypeDb(sys.argv[3]) + dst_path.write_bytes(traverse.TraverseFile(file_proto, ProtoFormatVisitor())) diff --git a/tools/protoxform/protoxform.bzl b/tools/protoxform/protoxform.bzl index eb04d511a04c..d3ea80534896 100644 --- a/tools/protoxform/protoxform.bzl +++ b/tools/protoxform/protoxform.bzl @@ -1,7 +1,17 @@ load("//tools/api_proto_plugin:plugin.bzl", "api_proto_plugin_aspect", "api_proto_plugin_impl") def _protoxform_impl(target, ctx): - return api_proto_plugin_impl(target, ctx, "proto", "protoxform", [".v2.proto", ".v3.proto", ".v3.envoy_internal.proto"]) + return api_proto_plugin_impl( + target, + ctx, + "proto", + "protoxform", + [ + ".active_or_frozen.proto", + ".next_major_version_candidate.proto", + ".next_major_version_candidate.envoy_internal.proto", + ], + ) # Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) # that can be invoked from the CLI to perform API transforms via //tools/protoxform for diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py index ad4a13938769..9331877aa17f 100755 --- a/tools/protoxform/protoxform.py +++ b/tools/protoxform/protoxform.py @@ -1,37 +1,19 @@ -# protoc plugin to map from FileDescriptorProtos to a canonicaly formatted -# proto. +# protoc plugin to map from FileDescriptorProtos to intermediate form # -# protoxform is currently only a formatting tool, but will act as the basis for -# vN -> v(N+1) API migration tooling, allowing for things like deprecated field -# removal, package renaming, field movement, providing both .proto and .cc code -# generation to support automation of Envoy API version translation. -# -# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto -# for the underlying protos mentioned in this file. +# protoxform takes a source FileDescriptorProto and generates active/next major +# version candidate FileDescriptorProtos. The resulting FileDescriptorProtos are +# then later processed by proto_sync.py, which invokes protoprint.py to format. -from collections import deque +import copy import functools -import io -import os -import pathlib -import re -import subprocess -from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin -from tools.api_proto_plugin import traverse from tools.api_proto_plugin import visitor from tools.protoxform import migrate -from tools.protoxform import options as protoxform_options from tools.protoxform import utils -from tools.type_whisperer import type_whisperer -from tools.type_whisperer.types_pb2 import Types - -from google.protobuf import descriptor_pb2 -from google.protobuf import text_format -# Note: we have to include those proto definitions to make FormatOptions work, -# this also serves as whitelist of extended options. +# Note: we have to include those proto definitions to ensure we don't lose these +# during FileDescriptorProto printing. from google.api import annotations_pb2 as _ from validate import validate_pb2 as _ from envoy.annotations import deprecation_pb2 as _ @@ -40,571 +22,75 @@ from udpa.annotations import sensitive_pb2 as _ from udpa.annotations import status_pb2 -NEXT_FREE_FIELD_MIN = 5 - class ProtoXformError(Exception): """Base error class for the protoxform module.""" -def ExtractClangProtoStyle(clang_format_text): - """Extract a key:value dictionary for proto formatting. - - Args: - clang_format_text: text from a .clang-format file. - - Returns: - key:value dictionary suitable for passing to clang-format --style. - """ - lang = None - format_dict = {} - for line in clang_format_text.split('\n'): - if lang is None or lang != 'Proto': - match = re.match('Language:\s+(\w+)', line) - if match: - lang = match.group(1) - continue - match = re.match('(\w+):\s+(\w+)', line) - if match: - key, value = match.groups() - format_dict[key] = value - else: - break - return str(format_dict) - - -# Ensure we are using the canonical clang-format proto style. -CLANG_FORMAT_STYLE = ExtractClangProtoStyle( - pathlib.Path(os.getenv('RUNFILES_DIR'), 'envoy/.clang-format').read_text()) - - -def ClangFormat(contents): - """Run proto-style oriented clang-format over given string. - - Args: - contents: a string with proto contents. - - Returns: - clang-formatted string - """ - return subprocess.run( - ['clang-format', - '--style=%s' % CLANG_FORMAT_STYLE, '--assume-filename=.proto'], - input=contents.encode('utf-8'), - stdout=subprocess.PIPE).stdout - - -def FormatBlock(block): - """Append \n to a .proto section (e.g. - - comment, message definition, etc.) if non-empty. - - Args: - block: a string representing the section. - - Returns: - A string with appropriate whitespace. - """ - if block.strip(): - return block + '\n' - return '' - - -def FormatComments(comments): - """Format a list of comment blocks from SourceCodeInfo. - - Prefixes // to each line, separates blocks by spaces. - - Args: - comments: a list of blocks, each block is a list of strings representing - lines in each block. - - Returns: - A string reprenting the formatted comment blocks. - """ - - # TODO(htuch): not sure why this is needed, but clang-format does some weird - # stuff with // comment indents when we have these trailing \ - def FixupTrailingBackslash(s): - return s[:-1].rstrip() if s.endswith('\\') else s - - comments = '\n\n'.join( - '\n'.join(['//%s' % FixupTrailingBackslash(line) - for line in comment.split('\n')[:-1]]) - for comment in comments) - return FormatBlock(comments) - - -def CreateNextFreeFieldXform(msg_proto): - """Return the next free field number annotation transformer of a message. - - Args: - msg_proto: DescriptorProto for message. - - Returns: - the next free field number annotation transformer. - """ - next_free = max( - sum([ - [f.number + 1 for f in msg_proto.field], - [rr.end for rr in msg_proto.reserved_range], - [ex.end for ex in msg_proto.extension_range], - ], [1])) - return lambda _: next_free if next_free > NEXT_FREE_FIELD_MIN else None - - -def FormatTypeContextComments(type_context, annotation_xforms=None): - """Format the leading/trailing comments in a given TypeContext. - - Args: - type_context: contextual information for message/enum/field. - annotation_xforms: a dict of transformers for annotations in leading - comment. - - Returns: - Tuple of formatted leading and trailing comment blocks. - """ - leading_comment = type_context.leading_comment - if annotation_xforms: - leading_comment = leading_comment.getCommentWithTransforms(annotation_xforms) - leading = FormatComments(list(type_context.leading_detached_comments) + [leading_comment.raw]) - trailing = FormatBlock(FormatComments([type_context.trailing_comment])) - return leading, trailing - - -def FormatHeaderFromFile(source_code_info, file_proto): - """Format proto header. - - Args: - source_code_info: SourceCodeInfo object. - file_proto: FileDescriptorProto for file. - - Returns: - Formatted proto header as a string. - """ - # Load the type database. - typedb = utils.GetTypeDb() - # Figure out type dependencies in this .proto. - types = Types() - text_format.Merge(traverse.TraverseFile(file_proto, type_whisperer.TypeWhispererVisitor()), types) - type_dependencies = sum([list(t.type_dependencies) for t in types.types.values()], []) - for service in file_proto.service: - for m in service.method: - type_dependencies.extend([m.input_type[1:], m.output_type[1:]]) - # Determine the envoy/ import paths from type deps. - envoy_proto_paths = set( - typedb.types[t].proto_path - for t in type_dependencies - if t.startswith('envoy.') and typedb.types[t].proto_path != file_proto.name) - - def CamelCase(s): - return ''.join(t.capitalize() for t in re.split('[\._]', s)) - - package_line = 'package %s;\n' % file_proto.package - file_block = '\n'.join(['syntax = "proto3";\n', package_line]) - - options = descriptor_pb2.FileOptions() - options.java_outer_classname = CamelCase(os.path.basename(file_proto.name)) - options.java_multiple_files = True - options.java_package = 'io.envoyproxy.' + file_proto.package - - # This is a workaround for C#/Ruby namespace conflicts between packages and - # objects, see https://github.com/envoyproxy/envoy/pull/3854. - # TODO(htuch): remove once v3 fixes this naming issue in - # https://github.com/envoyproxy/envoy/issues/8120. - if file_proto.package in ['envoy.api.v2.listener', 'envoy.api.v2.cluster']: - qualified_package = '.'.join(s.capitalize() for s in file_proto.package.split('.')) + 'NS' - options.csharp_namespace = qualified_package - options.ruby_package = qualified_package - - if file_proto.service: - options.java_generic_services = True - - if file_proto.options.HasExtension(migrate_pb2.file_migrate): - options.Extensions[migrate_pb2.file_migrate].CopyFrom( - file_proto.options.Extensions[migrate_pb2.file_migrate]) - - if file_proto.options.HasExtension( - status_pb2.file_status) and file_proto.package.endswith('alpha'): - options.Extensions[status_pb2.file_status].CopyFrom( - file_proto.options.Extensions[status_pb2.file_status]) - - options_block = FormatOptions(options) - - requires_versioning_import = any( - protoxform_options.GetVersioningAnnotation(m.options) for m in file_proto.message_type) - - envoy_imports = list(envoy_proto_paths) - google_imports = [] - infra_imports = [] - misc_imports = [] - public_imports = [] - - for idx, d in enumerate(file_proto.dependency): - if idx in file_proto.public_dependency: - public_imports.append(d) - continue - elif d in [ - 'envoy/annotations/resource.proto', - 'envoy/annotations/deprecation.proto', - 'udpa/annotations/migrate.proto', - ]: - infra_imports.append(d) - elif d.startswith('envoy/'): - # We ignore existing envoy/ imports, since these are computed explicitly - # from type_dependencies. - pass - elif d.startswith('google/'): - google_imports.append(d) - elif d.startswith('validate/'): - infra_imports.append(d) - elif d in ['udpa/annotations/versioning.proto', 'udpa/annotations/status.proto']: - # Skip, we decide to add this based on requires_versioning_import and options. - pass - else: - misc_imports.append(d) - - if options.HasExtension(status_pb2.file_status): - misc_imports.append('udpa/annotations/status.proto') - - if requires_versioning_import: - misc_imports.append('udpa/annotations/versioning.proto') - - def FormatImportBlock(xs): - if not xs: - return '' - return FormatBlock('\n'.join(sorted('import "%s";' % x for x in xs))) - - def FormatPublicImportBlock(xs): - if not xs: - return '' - return FormatBlock('\n'.join(sorted('import public "%s";' % x for x in xs))) - - import_block = '\n'.join( - map(FormatImportBlock, [envoy_imports, google_imports, misc_imports, infra_imports])) - import_block += '\n' + FormatPublicImportBlock(public_imports) - comment_block = FormatComments(source_code_info.file_level_comments) - - return ''.join(map(FormatBlock, [file_block, import_block, options_block, comment_block])) - - -def NormalizeFieldTypeName(type_context, field_fqn): - """Normalize a fully qualified field type name, e.g. - - .envoy.foo.bar is normalized to foo.bar. - - Considers type context to minimize type prefix. - - Args: - field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. - type_context: contextual information for message/enum/field. - - Returns: - Normalized type name as a string. - """ - if field_fqn.startswith('.'): - # Let's say we have type context namespace a.b.c.d.e and the type we're - # trying to normalize is a.b.d.e. We take (from the end) on package fragment - # at a time, and apply the inner-most evaluation that protoc performs to see - # if we evaluate to the fully qualified type. If so, we're done. It's not - # sufficient to compute common prefix and drop that, since in the above - # example the normalized type name would be d.e, which proto resolves inner - # most as a.b.c.d.e (bad) instead of the intended a.b.d.e. - field_fqn_splits = field_fqn[1:].split('.') - type_context_splits = type_context.name.split('.')[:-1] - remaining_field_fqn_splits = deque(field_fqn_splits[:-1]) - normalized_splits = deque([field_fqn_splits[-1]]) - - def EquivalentInTypeContext(splits): - type_context_splits_tmp = deque(type_context_splits) - while type_context_splits_tmp: - # If we're in a.b.c and the FQN is a.d.Foo, we want to return true once - # we have type_context_splits_tmp as [a] and splits as [d, Foo]. - if list(type_context_splits_tmp) + list(splits) == field_fqn_splits: - return True - # If we're in a.b.c.d.e.f and the FQN is a.b.d.e.Foo, we want to return True - # once we have type_context_splits_tmp as [a] and splits as [b, d, e, Foo], but - # not when type_context_splits_tmp is [a, b, c] and FQN is [d, e, Foo]. - if len(splits) > 1 and '.'.join(type_context_splits_tmp).endswith('.'.join( - list(splits)[:-1])): - return False - type_context_splits_tmp.pop() - return False - - while remaining_field_fqn_splits and not EquivalentInTypeContext(normalized_splits): - normalized_splits.appendleft(remaining_field_fqn_splits.pop()) - - # `extensions` is a keyword in proto2, and protoc will throw error if a type name - # starts with `extensions.`. - if normalized_splits[0] == 'extensions': - normalized_splits.appendleft(remaining_field_fqn_splits.pop()) - - return '.'.join(normalized_splits) - return field_fqn - - -def TypeNameFromFQN(fqn): - return fqn[1:] - - -def FormatFieldType(type_context, field): - """Format a FieldDescriptorProto type description. - - Args: - type_context: contextual information for message/enum/field. - field: FieldDescriptor proto. - - Returns: - Formatted proto field type as string. - """ - label = 'repeated ' if field.label == field.LABEL_REPEATED else '' - type_name = label + NormalizeFieldTypeName(type_context, field.type_name) - - if field.type == field.TYPE_MESSAGE: - if type_context.map_typenames and TypeNameFromFQN( - field.type_name) in type_context.map_typenames: - return 'map<%s, %s>' % tuple( - map(functools.partial(FormatFieldType, type_context), - type_context.map_typenames[TypeNameFromFQN(field.type_name)])) - return type_name - elif field.type_name: - return type_name - - pretty_type_names = { - field.TYPE_DOUBLE: 'double', - field.TYPE_FLOAT: 'float', - field.TYPE_INT32: 'int32', - field.TYPE_SFIXED32: 'int32', - field.TYPE_SINT32: 'int32', - field.TYPE_FIXED32: 'uint32', - field.TYPE_UINT32: 'uint32', - field.TYPE_INT64: 'int64', - field.TYPE_SFIXED64: 'int64', - field.TYPE_SINT64: 'int64', - field.TYPE_FIXED64: 'uint64', - field.TYPE_UINT64: 'uint64', - field.TYPE_BOOL: 'bool', - field.TYPE_STRING: 'string', - field.TYPE_BYTES: 'bytes', - } - if field.type in pretty_type_names: - return label + pretty_type_names[field.type] - raise ProtoXformError('Unknown field type ' + str(field.type)) - - -def FormatServiceMethod(type_context, method): - """Format a service MethodDescriptorProto. - - Args: - type_context: contextual information for method. - method: MethodDescriptorProto proto. - - Returns: - Formatted service method as string. - """ - - def FormatStreaming(s): - return 'stream ' if s else '' - - leading_comment, trailing_comment = FormatTypeContextComments(type_context) - return '%srpc %s(%s%s%s) returns (%s%s) {%s}\n' % ( - leading_comment, method.name, trailing_comment, FormatStreaming( - method.client_streaming), NormalizeFieldTypeName( - type_context, method.input_type), FormatStreaming(method.server_streaming), - NormalizeFieldTypeName(type_context, method.output_type), FormatOptions(method.options)) - - -def FormatField(type_context, field): - """Format FieldDescriptorProto as a proto field. - - Args: - type_context: contextual information for message/enum/field. - field: FieldDescriptor proto. - - Returns: - Formatted proto field as a string. - """ - if protoxform_options.HasHideOption(field.options): - return '' - leading_comment, trailing_comment = FormatTypeContextComments(type_context) - - return '%s%s %s = %d%s;\n%s' % (leading_comment, FormatFieldType(type_context, field), field.name, - field.number, FormatOptions(field.options), trailing_comment) - - -def FormatEnumValue(type_context, value): - """Format a EnumValueDescriptorProto as a proto enum value. - - Args: - type_context: contextual information for message/enum/field. - value: EnumValueDescriptorProto. - - Returns: - Formatted proto enum value as a string. - """ - if protoxform_options.HasHideOption(value.options): - return '' - leading_comment, trailing_comment = FormatTypeContextComments(type_context) - formatted_annotations = FormatOptions(value.options) - return '%s%s = %d%s;\n%s' % (leading_comment, value.name, value.number, formatted_annotations, - trailing_comment) - - -def TextFormatValue(field, value): - """Format the value as protobuf text format - - Args: - field: a FieldDescriptor that describes the field - value: the value stored in the field - - Returns: - value in protobuf text format - """ - out = io.StringIO() - text_format.PrintFieldValue(field, value, out) - return out.getvalue() - - -def FormatOptions(options): - """Format *Options (e.g. - - MessageOptions, FieldOptions) message. - - Args: - options: A *Options (e.g. MessageOptions, FieldOptions) message. - - Returns: - Formatted options as a string. - """ - - formatted_options = [] - for option_descriptor, option_value in sorted(options.ListFields(), key=lambda x: x[0].number): - option_name = '({})'.format( - option_descriptor.full_name) if option_descriptor.is_extension else option_descriptor.name - if option_descriptor.message_type and option_descriptor.label != option_descriptor.LABEL_REPEATED: - formatted_options.extend([ - '{}.{} = {}'.format(option_name, subfield.name, TextFormatValue(subfield, value)) - for subfield, value in option_value.ListFields() - ]) - else: - formatted_options.append('{} = {}'.format(option_name, - TextFormatValue(option_descriptor, option_value))) - - if formatted_options: - if options.DESCRIPTOR.name in ('EnumValueOptions', 'FieldOptions'): - return '[{}]'.format(','.join(formatted_options)) - else: - return FormatBlock(''.join( - 'option {};\n'.format(formatted_option) for formatted_option in formatted_options)) - return '' - - -def FormatReserved(enum_or_msg_proto): - """Format reserved values/names in a [Enum]DescriptorProto. - - Args: - enum_or_msg_proto: [Enum]DescriptorProto message. - - Returns: - Formatted enum_or_msg_proto as a string. - """ - reserved_fields = FormatBlock('reserved %s;\n' % ','.join( - map(str, sum([list(range(rr.start, rr.end)) for rr in enum_or_msg_proto.reserved_range], - [])))) if enum_or_msg_proto.reserved_range else '' - if enum_or_msg_proto.reserved_name: - reserved_fields += FormatBlock('reserved %s;\n' % - ', '.join('"%s"' % n for n in enum_or_msg_proto.reserved_name)) - return reserved_fields - - class ProtoFormatVisitor(visitor.Visitor): """Visitor to generate a proto representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ + def __init__(self, active_or_frozen, params): + if params['type_db_path']: + utils.LoadTypeDb(params['type_db_path']) + self._freeze = 'extra_args' in params and params['extra_args'] == 'freeze' + self._active_or_frozen = active_or_frozen + def VisitService(self, service_proto, type_context): - leading_comment, trailing_comment = FormatTypeContextComments(type_context) - methods = '\n'.join( - FormatServiceMethod(type_context.ExtendMethod(index, m.name), m) - for index, m in enumerate(service_proto.method)) - options = FormatBlock(FormatOptions(service_proto.options)) - return '%sservice %s {\n%s%s%s\n}\n' % (leading_comment, service_proto.name, options, - trailing_comment, methods) + return None def VisitEnum(self, enum_proto, type_context): - leading_comment, trailing_comment = FormatTypeContextComments(type_context) - formatted_options = FormatOptions(enum_proto.options) - reserved_fields = FormatReserved(enum_proto) - values = [ - FormatEnumValue(type_context.ExtendField(index, value.name), value) - for index, value in enumerate(enum_proto.value) - ] - joined_values = ('\n' if any('//' in v for v in values) else '').join(values) - return '%senum %s {\n%s%s%s%s\n}\n' % (leading_comment, enum_proto.name, trailing_comment, - formatted_options, reserved_fields, joined_values) + return None def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): - # Skip messages synthesized to represent map types. - if msg_proto.options.map_entry: - return '' - if protoxform_options.HasHideOption(msg_proto.options): - return '' - annotation_xforms = { - annotations.NEXT_FREE_FIELD_ANNOTATION: CreateNextFreeFieldXform(msg_proto) - } - leading_comment, trailing_comment = FormatTypeContextComments(type_context, annotation_xforms) - formatted_options = FormatOptions(msg_proto.options) - formatted_enums = FormatBlock('\n'.join(nested_enums)) - formatted_msgs = FormatBlock('\n'.join(nested_msgs)) - reserved_fields = FormatReserved(msg_proto) - # Recover the oneof structure. This needs some extra work, since - # DescriptorProto just gives use fields and a oneof_index that can allow - # recovery of the original oneof placement. - fields = '' - oneof_index = None - for index, field in enumerate(msg_proto.field): - if oneof_index is not None: - if not field.HasField('oneof_index') or field.oneof_index != oneof_index: - fields += '}\n\n' - oneof_index = None - if oneof_index is None and field.HasField('oneof_index'): - oneof_index = field.oneof_index - oneof_proto = msg_proto.oneof_decl[oneof_index] - oneof_leading_comment, oneof_trailing_comment = FormatTypeContextComments( - type_context.ExtendOneof(oneof_index, field.name)) - fields += '%soneof %s {\n%s%s' % (oneof_leading_comment, oneof_proto.name, - oneof_trailing_comment, FormatOptions( - oneof_proto.options)) - fields += FormatBlock(FormatField(type_context.ExtendField(index, field.name), field)) - if oneof_index is not None: - fields += '}\n\n' - return '%smessage %s {\n%s%s%s%s%s%s\n}\n' % (leading_comment, msg_proto.name, trailing_comment, - formatted_options, formatted_enums, - formatted_msgs, reserved_fields, fields) + return None def VisitFile(self, file_proto, type_context, services, msgs, enums): - header = FormatHeaderFromFile(type_context.source_code_info, file_proto) - formatted_services = FormatBlock('\n'.join(services)) - formatted_enums = FormatBlock('\n'.join(enums)) - formatted_msgs = FormatBlock('\n'.join(msgs)) - return ClangFormat(header + formatted_services + formatted_enums + formatted_msgs) - - -def ParameterCallback(parameter): - params = dict(param.split('=') for param in parameter.split(',')) - if params['type_db_path']: - utils.LoadTypeDb(params['type_db_path']) + # Freeze protos that have next major version candidates. + typedb = utils.GetTypeDb() + output_proto = copy.deepcopy(file_proto) + existing_pkg_version_status = output_proto.options.Extensions[ + status_pb2.file_status].package_version_status + empty_file = len(services) == 0 and len(enums) == 0 and len(msgs) == 0 + pkg_version_status_exempt = file_proto.name.startswith('envoy/annotations') or empty_file + # It's a format error not to set package_version_status. + if existing_pkg_version_status == status_pb2.UNKNOWN and not pkg_version_status_exempt: + raise ProtoXformError('package_version_status must be set in %s' % file_proto.name) + # Only update package_version_status for .active_or_frozen.proto, + # migrate.VersionUpgradeXform has taken care of next major version + # candidates. + if self._active_or_frozen and not pkg_version_status_exempt: + # Freeze if this is an active package with a next major version. Preserve + # frozen status otherwise. + if self._freeze and typedb.next_version_protos.get(output_proto.name, None): + target_pkg_version_status = status_pb2.FROZEN + elif existing_pkg_version_status == status_pb2.FROZEN: + target_pkg_version_status = status_pb2.FROZEN + else: + assert (existing_pkg_version_status == status_pb2.ACTIVE) + target_pkg_version_status = status_pb2.ACTIVE + output_proto.options.Extensions[ + status_pb2.file_status].package_version_status = target_pkg_version_status + return str(output_proto) def Main(): plugin.Plugin([ - plugin.DirectOutputDescriptor('.v2.proto', ProtoFormatVisitor), - plugin.OutputDescriptor('.v3.proto', ProtoFormatVisitor, - functools.partial(migrate.V3MigrationXform, False)), - plugin.OutputDescriptor('.v3.envoy_internal.proto', ProtoFormatVisitor, - functools.partial(migrate.V3MigrationXform, True)) - ], ParameterCallback) + plugin.DirectOutputDescriptor('.active_or_frozen.proto', + functools.partial(ProtoFormatVisitor, True), + want_params=True), + plugin.OutputDescriptor('.next_major_version_candidate.proto', + functools.partial(ProtoFormatVisitor, False), + functools.partial(migrate.VersionUpgradeXform, 2, False), + want_params=True), + plugin.OutputDescriptor('.next_major_version_candidate.envoy_internal.proto', + functools.partial(ProtoFormatVisitor, False), + functools.partial(migrate.VersionUpgradeXform, 2, True), + want_params=True) + ]) if __name__ == '__main__': diff --git a/tools/protoxform/protoxform_test.sh b/tools/protoxform/protoxform_test.sh index b4661db9c3a9..3fe0a5319757 100755 --- a/tools/protoxform/protoxform_test.sh +++ b/tools/protoxform/protoxform_test.sh @@ -1,17 +1,26 @@ #!/bin/bash -rm -rf bazel-bin/tools +set -e -declare -r PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:protos))") +rm -rf bazel-bin/tools BAZEL_BUILD_OPTIONS+=" --remote_download_outputs=all" -bazel build ${BAZEL_BUILD_OPTIONS} --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:protos \ - //tools/testdata/protoxform:protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto \ - --action_env=CPROFILE_ENABLED=1 --host_force_python=PY3 - TOOLS=$(dirname $(dirname $(realpath $0))) # to satisfy dependency on run_command export PYTHONPATH="$TOOLS" -./tools/protoxform/protoxform_test_helper.py ${PROTO_TARGETS} +# protoxform fix test cases +PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:fix_protos))") +bazel build ${BAZEL_BUILD_OPTIONS} --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:fix_protos \ + //tools/testdata/protoxform:fix_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto +bazel build ${BAZEL_BUILD_OPTIONS} //tools/protoxform:protoprint +./tools/protoxform/protoxform_test_helper.py fix ${PROTO_TARGETS} + +# protoxform freeze test cases +PROTO_TARGETS=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:freeze_protos))") +bazel build ${BAZEL_BUILD_OPTIONS} --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:freeze_protos \ + --//tools/api_proto_plugin:extra_args=freeze \ + //tools/testdata/protoxform:freeze_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto +bazel build ${BAZEL_BUILD_OPTIONS} //tools/protoxform:protoprint +./tools/protoxform/protoxform_test_helper.py freeze ${PROTO_TARGETS} diff --git a/tools/protoxform/protoxform_test_helper.py b/tools/protoxform/protoxform_test_helper.py index 0daeb25d1de7..dfa4c3ab7813 100755 --- a/tools/protoxform/protoxform_test_helper.py +++ b/tools/protoxform/protoxform_test_helper.py @@ -4,8 +4,11 @@ import logging import os +import pathlib import re +import subprocess import sys +import tempfile def PathAndFilename(label): @@ -37,18 +40,34 @@ def GoldenProtoFile(path, filename, version): version: api version to specify target golden proto filename Returns: - actual golden proto absolute path + actual golden proto absolute path """ base = "./" base += path + "/" + filename + "." + version + ".gold" return os.path.abspath(base) -def ResultProtoFile(path, filename, version): +def ProtoPrint(src, dst): + """Pretty-print FileDescriptorProto to a destination file. + + Args: + src: source path for FileDescriptorProto. + dst: destination path for formatted proto. + """ + print('ProtoPrint %s -> %s' % (src, dst)) + subprocess.check_call([ + 'bazel-bin/tools/protoxform/protoprint', src, dst, + './bazel-bin/tools/protoxform/protoprint.runfiles/envoy/tools/type_whisperer/api_type_db.pb_text' + ]) + + +def ResultProtoFile(cmd, path, tmp, filename, version): """Retrieve result proto file path. In general, those are placed in bazel artifacts. Args: + cmd: fix or freeze? path: target proto path + tmp: temporary directory. filename: target proto filename version: api version to specify target result proto filename @@ -56,10 +75,12 @@ def ResultProtoFile(path, filename, version): actual result proto absolute path """ base = "./bazel-bin" - base += os.path.join(path, "protos") + base += os.path.join(path, "%s_protos" % cmd) base += os.path.join(base, path) base += "/{0}.{1}.proto".format(filename, version) - return os.path.abspath(base) + dst = os.path.join(tmp, filename) + ProtoPrint(os.path.abspath(base), dst) + return dst def Diff(result_file, golden_file): @@ -79,10 +100,11 @@ def Diff(result_file, golden_file): return [status, stdout, stderr] -def Run(path, filename, version): +def Run(cmd, path, filename, version): """Run main execution for protoxform test Args: + cmd: fix or freeze? path: target proto path filename: target proto filename version: api version to specify target result proto filename @@ -91,26 +113,30 @@ def Run(path, filename, version): result message extracted from diff command """ message = "" - golden_path = GoldenProtoFile(path, filename, version) - test_path = ResultProtoFile(path, filename, version) + with tempfile.TemporaryDirectory() as tmp: + golden_path = GoldenProtoFile(path, filename, version) + test_path = ResultProtoFile(cmd, path, tmp, filename, version) + if os.stat(golden_path).st_size == 0 and not os.path.exists(test_path): + return message - status, stdout, stderr = Diff(test_path, golden_path) + status, stdout, stderr = Diff(golden_path, test_path) - if status != 0: - message = '\n'.join([str(line) for line in stdout + stderr]) + if status != 0: + message = '\n'.join([str(line) for line in stdout + stderr]) - return message + return message if __name__ == "__main__": messages = "" logging.basicConfig(format='%(message)s') - for target in sys.argv[1:]: + cmd = sys.argv[1] + for target in sys.argv[2:]: path, filename = PathAndFilename(target) - messages += Run(path, filename, 'v2') - messages += Run(path, filename, 'v3') - messages += Run(path, filename, 'v3') - messages += Run(path, filename, 'v3.envoy_internal') + messages += Run(cmd, path, filename, 'active_or_frozen') + messages += Run(cmd, path, filename, 'next_major_version_candidate') + messages += Run(cmd, path, filename, 'next_major_version_candidate') + messages += Run(cmd, path, filename, 'next_major_version_candidate.envoy_internal') if len(messages) == 0: logging.warning("PASS") diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 4990223faaec..7cba2f0a565d 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -321,6 +321,7 @@ VH VHDS VLOG VM +WAITFORONE WASM WAVM WIP @@ -353,6 +354,7 @@ alignof alloc alloca allocator +allowlist alls alphanumerics amongst @@ -527,6 +529,7 @@ dirname djb downcasted downstreams +drainable dtor dubbo dup @@ -624,6 +627,7 @@ hermeticity highp hoc hostname +hostnames hostset hrefs huffman @@ -636,6 +640,7 @@ ified impl implementors impls +indices inflater inflight -ing @@ -730,6 +735,8 @@ misconfigured mixin mkdir mmap +mmsg +mmsghdr mongo moveable msec @@ -751,6 +758,7 @@ namespaces namespacing nan natively +ndk netblock netblocks netfilter @@ -941,6 +949,7 @@ sched schemas scopekey sendmsg +sendmmsg sendto serializable serializer diff --git a/tools/testdata/check_format/counter_from_string.cc b/tools/testdata/check_format/counter_from_string.cc index 8c89250fefe9..07c1cdad54ef 100644 --- a/tools/testdata/check_format/counter_from_string.cc +++ b/tools/testdata/check_format/counter_from_string.cc @@ -1,7 +1,7 @@ namespace Envoy { void init(Stats::Scope& scope) { - scope.counter("hello"); + scope.counterFromString("hello"); } } // namespace Envoy diff --git a/tools/testdata/check_format/gauge_from_string.cc b/tools/testdata/check_format/gauge_from_string.cc index 06dbd01d2ea3..b23bf66e6273 100644 --- a/tools/testdata/check_format/gauge_from_string.cc +++ b/tools/testdata/check_format/gauge_from_string.cc @@ -1,7 +1,7 @@ namespace Envoy { void init(Stats::Scope& scope) { - scope.gauge("hello"); + scope.gaugeFromString("hello"); } } // namespace Envoy diff --git a/tools/testdata/check_format/histogram_from_string.cc b/tools/testdata/check_format/histogram_from_string.cc index e7668d3eeada..e9bf611ddfaa 100644 --- a/tools/testdata/check_format/histogram_from_string.cc +++ b/tools/testdata/check_format/histogram_from_string.cc @@ -1,5 +1,7 @@ namespace Envoy { -void init(Stats::Scope& scope) { scope.histogram("hello", Stats::Histogram::Unit::Unspecified); } +void init(Stats::Scope& scope) { + scope.histogramFromString("hello", Stats::Histogram::Unit::Unspecified); +} } // namespace Envoy diff --git a/tools/testdata/protoxform/BUILD b/tools/testdata/protoxform/BUILD index 2ffec2e74aef..382cffec50e4 100644 --- a/tools/testdata/protoxform/BUILD +++ b/tools/testdata/protoxform/BUILD @@ -1,9 +1,20 @@ licenses(["notice"]) # Apache 2 proto_library( - name = "protos", + name = "fix_protos", visibility = ["//visibility:public"], deps = [ - "//tools/testdata/protoxform/envoy/v2:protos", + "//tools/testdata/protoxform/envoy/v2:fix_protos", + ], +) + +proto_library( + name = "freeze_protos", + visibility = ["//visibility:public"], + deps = [ + "//tools/testdata/protoxform/envoy/active_non_terminal/v2:freeze_protos", + "//tools/testdata/protoxform/envoy/active_terminal/v2:freeze_protos", + "//tools/testdata/protoxform/envoy/frozen/v2:freeze_protos", + "//tools/testdata/protoxform/envoy/frozen/v3:freeze_protos", ], ) diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD new file mode 100644 index 000000000000..4c756ea94137 --- /dev/null +++ b/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD @@ -0,0 +1,8 @@ +licenses(["notice"]) # Apache 2 + +proto_library( + name = "freeze_protos", + srcs = ["active_non_terminal.proto"], + visibility = ["//visibility:public"], + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto new file mode 100644 index 000000000000..0e21d4700c62 --- /dev/null +++ b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package envoy.active_non_terminal.v2; + +import "udpa/annotations/status.proto"; + +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +message ActiveNonTerminal { + int32 foo = 1 [deprecated = true]; + int32 bar = 2; +} diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold new file mode 100644 index 000000000000..859456d5bb8d --- /dev/null +++ b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package envoy.active_non_terminal.v2; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.active_non_terminal.v2"; +option java_outer_classname = "ActiveNonTerminalProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +message ActiveNonTerminal { + int32 foo = 1 [deprecated = true]; + + int32 bar = 2; +} diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold new file mode 100644 index 000000000000..e351dc4331e1 --- /dev/null +++ b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.active_non_terminal.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.active_non_terminal.v3"; +option java_outer_classname = "ActiveNonTerminalProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +message ActiveNonTerminal { + option (udpa.annotations.versioning).previous_message_type = + "envoy.active_non_terminal.v2.ActiveNonTerminal"; + + int32 hidden_envoy_deprecated_foo = 1 [deprecated = true]; + + int32 bar = 2; +} diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold new file mode 100644 index 000000000000..5d369aefd96d --- /dev/null +++ b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package envoy.active_non_terminal.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.active_non_terminal.v3"; +option java_outer_classname = "ActiveNonTerminalProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +message ActiveNonTerminal { + option (udpa.annotations.versioning).previous_message_type = + "envoy.active_non_terminal.v2.ActiveNonTerminal"; + + reserved 1; + + reserved "foo"; + + int32 bar = 2; +} diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD new file mode 100644 index 000000000000..d97319b2631a --- /dev/null +++ b/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD @@ -0,0 +1,8 @@ +licenses(["notice"]) # Apache 2 + +proto_library( + name = "freeze_protos", + srcs = ["active_terminal.proto"], + visibility = ["//visibility:public"], + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto new file mode 100644 index 000000000000..1c5bdaca3683 --- /dev/null +++ b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package envoy.active_terminal.v2; + +import "udpa/annotations/status.proto"; + +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +message ActiveTerminal { + int32 foo = 1; +} diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold new file mode 100644 index 000000000000..5e49be1e63b4 --- /dev/null +++ b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package envoy.active_terminal.v2; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.active_terminal.v2"; +option java_outer_classname = "ActiveTerminalProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +message ActiveTerminal { + int32 foo = 1; +} diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.envoy_internal.gold new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.gold new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tools/testdata/protoxform/envoy/frozen/v2/BUILD b/tools/testdata/protoxform/envoy/frozen/v2/BUILD new file mode 100644 index 000000000000..bbbcaffdbc75 --- /dev/null +++ b/tools/testdata/protoxform/envoy/frozen/v2/BUILD @@ -0,0 +1,8 @@ +licenses(["notice"]) # Apache 2 + +proto_library( + name = "freeze_protos", + srcs = ["frozen.proto"], + visibility = ["//visibility:public"], + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto new file mode 100644 index 000000000000..defe7ff3eac4 --- /dev/null +++ b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package envoy.frozen.v2; + +import "udpa/annotations/status.proto"; + +option (udpa.annotations.file_status).package_version_status = FROZEN; + +message Frozen { + int32 foo = 1; + int32 bar = 2 [deprecated = true]; +} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold new file mode 100644 index 000000000000..5086376ee435 --- /dev/null +++ b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package envoy.frozen.v2; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.frozen.v2"; +option java_outer_classname = "FrozenProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +message Frozen { + int32 foo = 1; + + int32 bar = 2 [deprecated = true]; +} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold new file mode 100644 index 000000000000..fa0b405ec586 --- /dev/null +++ b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package envoy.frozen.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.frozen.v3"; +option java_outer_classname = "FrozenProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +message Frozen { + option (udpa.annotations.versioning).previous_message_type = "envoy.frozen.v2.Frozen"; + + int32 foo = 1; + + int32 hidden_envoy_deprecated_bar = 2 [deprecated = true]; +} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold new file mode 100644 index 000000000000..7c10c1313b27 --- /dev/null +++ b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.frozen.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.frozen.v3"; +option java_outer_classname = "FrozenProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +message Frozen { + option (udpa.annotations.versioning).previous_message_type = "envoy.frozen.v2.Frozen"; + + reserved 2; + + reserved "bar"; + + int32 foo = 1; +} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/BUILD b/tools/testdata/protoxform/envoy/frozen/v3/BUILD new file mode 100644 index 000000000000..bbbcaffdbc75 --- /dev/null +++ b/tools/testdata/protoxform/envoy/frozen/v3/BUILD @@ -0,0 +1,8 @@ +licenses(["notice"]) # Apache 2 + +proto_library( + name = "freeze_protos", + srcs = ["frozen.proto"], + visibility = ["//visibility:public"], + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto new file mode 100644 index 000000000000..0e09acf92fe6 --- /dev/null +++ b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package envoy.frozen.v3; + +import "udpa/annotations/status.proto"; + +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +message Frozen { + int32 foo = 1; + reserved 2; +} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold new file mode 100644 index 000000000000..23740e54e11f --- /dev/null +++ b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package envoy.frozen.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.frozen.v3"; +option java_outer_classname = "FrozenProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +message Frozen { + reserved 2; + + int32 foo = 1; +} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.envoy_internal.gold new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.gold new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tools/testdata/protoxform/envoy/v2/BUILD b/tools/testdata/protoxform/envoy/v2/BUILD index 61eeb69ded13..f381f26cfa5f 100644 --- a/tools/testdata/protoxform/envoy/v2/BUILD +++ b/tools/testdata/protoxform/envoy/v2/BUILD @@ -1,7 +1,7 @@ licenses(["notice"]) # Apache 2 proto_library( - name = "protos", + name = "fix_protos", srcs = [ "discovery_service.proto", "oneof.proto", @@ -15,3 +15,18 @@ proto_library( "@envoy_api//envoy/api/v2:pkg", ], ) + +proto_library( + name = "freeze_protos", + srcs = [ + "active_non_terminal.proto", + "active_terminal.proto", + "frozen.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@envoy_api//envoy/annotations:pkg", + "@envoy_api//envoy/api/v2:pkg", + ], +) diff --git a/tools/testdata/protoxform/envoy/v2/discovery_service.proto b/tools/testdata/protoxform/envoy/v2/discovery_service.proto index 1a4530176ff3..921f27133dfb 100644 --- a/tools/testdata/protoxform/envoy/v2/discovery_service.proto +++ b/tools/testdata/protoxform/envoy/v2/discovery_service.proto @@ -8,6 +8,9 @@ import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; + +option (udpa.annotations.file_status).package_version_status = ACTIVE; service SomeDiscoveryService { option (envoy.annotations.resource).type = "envoy.v2.SomeResource"; diff --git a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.v2.gold b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.active_or_frozen.gold similarity index 86% rename from tools/testdata/protoxform/envoy/v2/discovery_service.proto.v2.gold rename to tools/testdata/protoxform/envoy/v2/discovery_service.proto.active_or_frozen.gold index e2729b8c1db5..40d597ad8cd2 100644 --- a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.v2.gold +++ b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.active_or_frozen.gold @@ -2,17 +2,19 @@ syntax = "proto3"; package envoy.v2; -import ""; +import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.v2"; option java_outer_classname = "DiscoveryServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; service SomeDiscoveryService { option (envoy.annotations.resource).type = "envoy.v2.SomeResource"; diff --git a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.v3.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold similarity index 85% rename from tools/testdata/protoxform/envoy/v2/discovery_service.proto.v3.envoy_internal.gold rename to tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold index c44b0635b42c..cd6b36941d92 100644 --- a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.v3.envoy_internal.gold +++ b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold @@ -2,18 +2,19 @@ syntax = "proto3"; package envoy.v3; -import ""; +import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.v3"; option java_outer_classname = "DiscoveryServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; service SomeDiscoveryService { option (envoy.annotations.resource).type = "envoy.v3.SomeResource"; diff --git a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.v3.gold b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold similarity index 85% rename from tools/testdata/protoxform/envoy/v2/discovery_service.proto.v3.gold rename to tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold index c44b0635b42c..cd6b36941d92 100644 --- a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.v3.gold +++ b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold @@ -2,18 +2,19 @@ syntax = "proto3"; package envoy.v3; -import ""; +import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; -import "udpa/annotations/versioning.proto"; - import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.v3"; option java_outer_classname = "DiscoveryServiceProto"; option java_multiple_files = true; option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; service SomeDiscoveryService { option (envoy.annotations.resource).type = "envoy.v3.SomeResource"; diff --git a/tools/testdata/protoxform/envoy/v2/oneof.proto b/tools/testdata/protoxform/envoy/v2/oneof.proto index 07a3f7bbfae0..8630b23e37b5 100644 --- a/tools/testdata/protoxform/envoy/v2/oneof.proto +++ b/tools/testdata/protoxform/envoy/v2/oneof.proto @@ -3,6 +3,9 @@ syntax = "proto3"; package envoy.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; + +option (udpa.annotations.file_status).package_version_status = ACTIVE; message OneofExample { string foo = 1 [(udpa.annotations.field_migrate).oneof_promotion = "baz_specifier"]; diff --git a/tools/testdata/protoxform/envoy/v2/oneof.proto.v2.gold b/tools/testdata/protoxform/envoy/v2/oneof.proto.active_or_frozen.gold similarity index 80% rename from tools/testdata/protoxform/envoy/v2/oneof.proto.v2.gold rename to tools/testdata/protoxform/envoy/v2/oneof.proto.active_or_frozen.gold index 242f984cc888..19d79b993af8 100644 --- a/tools/testdata/protoxform/envoy/v2/oneof.proto.v2.gold +++ b/tools/testdata/protoxform/envoy/v2/oneof.proto.active_or_frozen.gold @@ -3,10 +3,12 @@ syntax = "proto3"; package envoy.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.v2"; option java_outer_classname = "OneofProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; message OneofExample { string foo = 1 [(udpa.annotations.field_migrate).oneof_promotion = "baz_specifier"]; diff --git a/tools/testdata/protoxform/envoy/v2/oneof.proto.v3.gold b/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold similarity index 76% rename from tools/testdata/protoxform/envoy/v2/oneof.proto.v3.gold rename to tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold index 68bb86e12bd0..acd2fee1a962 100644 --- a/tools/testdata/protoxform/envoy/v2/oneof.proto.v3.gold +++ b/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.v3"; option java_outer_classname = "OneofProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; message OneofExample { option (udpa.annotations.versioning).previous_message_type = "envoy.v2.OneofExample"; diff --git a/tools/testdata/protoxform/envoy/v2/oneof.proto.v3.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold similarity index 76% rename from tools/testdata/protoxform/envoy/v2/oneof.proto.v3.envoy_internal.gold rename to tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold index 68bb86e12bd0..acd2fee1a962 100644 --- a/tools/testdata/protoxform/envoy/v2/oneof.proto.v3.envoy_internal.gold +++ b/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.v3"; option java_outer_classname = "OneofProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; message OneofExample { option (udpa.annotations.versioning).previous_message_type = "envoy.v2.OneofExample"; diff --git a/tools/testdata/protoxform/envoy/v2/package_move.proto b/tools/testdata/protoxform/envoy/v2/package_move.proto index 3356dd60152e..0bf083bb58c8 100644 --- a/tools/testdata/protoxform/envoy/v2/package_move.proto +++ b/tools/testdata/protoxform/envoy/v2/package_move.proto @@ -3,8 +3,10 @@ syntax = "proto3"; package envoy.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option (udpa.annotations.file_migrate).move_to_package = "envoy.foo.v3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; message Package { message Entry { diff --git a/tools/testdata/protoxform/envoy/v2/package_move.proto.v2.gold b/tools/testdata/protoxform/envoy/v2/package_move.proto.active_or_frozen.gold similarity index 78% rename from tools/testdata/protoxform/envoy/v2/package_move.proto.v2.gold rename to tools/testdata/protoxform/envoy/v2/package_move.proto.active_or_frozen.gold index d83a425c2f21..a8e7a9fce768 100644 --- a/tools/testdata/protoxform/envoy/v2/package_move.proto.v2.gold +++ b/tools/testdata/protoxform/envoy/v2/package_move.proto.active_or_frozen.gold @@ -3,11 +3,13 @@ syntax = "proto3"; package envoy.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.v2"; option java_outer_classname = "PackageMoveProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.foo.v3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; message Package { message Entry { diff --git a/tools/testdata/protoxform/envoy/v2/package_move.proto.v3.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold similarity index 79% rename from tools/testdata/protoxform/envoy/v2/package_move.proto.v3.envoy_internal.gold rename to tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold index b81658afaaa9..e7aaa8085a3e 100644 --- a/tools/testdata/protoxform/envoy/v2/package_move.proto.v3.envoy_internal.gold +++ b/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.foo.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.foo.v3"; option java_outer_classname = "PackageMoveProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; message Package { option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package"; diff --git a/tools/testdata/protoxform/envoy/v2/package_move.proto.v3.gold b/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold similarity index 79% rename from tools/testdata/protoxform/envoy/v2/package_move.proto.v3.gold rename to tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold index b81658afaaa9..e7aaa8085a3e 100644 --- a/tools/testdata/protoxform/envoy/v2/package_move.proto.v3.gold +++ b/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.foo.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.foo.v3"; option java_outer_classname = "PackageMoveProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; message Package { option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package"; diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto b/tools/testdata/protoxform/envoy/v2/sample.proto index 1ab0052851eb..73649fb32db0 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto +++ b/tools/testdata/protoxform/envoy/v2/sample.proto @@ -3,6 +3,9 @@ syntax = "proto3"; package envoy.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; + +option (udpa.annotations.file_status).package_version_status = ACTIVE; enum SomeEnum { DEFAULT = 0 [deprecated = true]; diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.v2.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold similarity index 85% rename from tools/testdata/protoxform/envoy/v2/sample.proto.v2.gold rename to tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold index acfaa40a7038..577b8ddcc1f2 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.v2.gold +++ b/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold @@ -3,10 +3,12 @@ syntax = "proto3"; package envoy.v2; import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.v2"; option java_outer_classname = "SampleProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; enum SomeEnum { DEFAULT = 0 [deprecated = true]; diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.v3.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold similarity index 85% rename from tools/testdata/protoxform/envoy/v2/sample.proto.v3.envoy_internal.gold rename to tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold index 8ab2c509df41..3f10d5e043c4 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.v3.envoy_internal.gold +++ b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.v3"; option java_outer_classname = "SampleProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; enum SomeEnum { hidden_envoy_deprecated_DEFAULT = 0 [deprecated = true]; diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.v3.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold similarity index 84% rename from tools/testdata/protoxform/envoy/v2/sample.proto.v3.gold rename to tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold index 5a48febf7f41..61ceb6463d14 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.v3.gold +++ b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold @@ -2,11 +2,13 @@ syntax = "proto3"; package envoy.v3; +import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.v3"; option java_outer_classname = "SampleProto"; option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; enum SomeEnum { reserved 2; diff --git a/tools/type_whisperer/BUILD b/tools/type_whisperer/BUILD index 4f586cb56e59..3acb95c8adbe 100644 --- a/tools/type_whisperer/BUILD +++ b/tools/type_whisperer/BUILD @@ -16,7 +16,6 @@ envoy_proto_library( py_binary( name = "type_whisperer", srcs = ["type_whisperer.py"], - python_version = "PY3", visibility = ["//visibility:public"], deps = [ ":types_py_proto", @@ -29,7 +28,6 @@ py_binary( py_binary( name = "typedb_gen", srcs = ["typedb_gen.py"], - python_version = "PY3", visibility = ["//visibility:public"], deps = [ ":api_type_db_proto_py_proto", @@ -57,8 +55,7 @@ py_binary( label_flag( name = "api_type_db_target", - # TODO(htuch): break dependence of API type DB on docs target. - build_setting_default = "@envoy_api_canonical//docs:protos", + build_setting_default = "@envoy_api_canonical//versioning:active_protos", visibility = ["//visibility:public"], ) diff --git a/tools/type_whisperer/proto_build_targets_gen.py b/tools/type_whisperer/proto_build_targets_gen.py index 22df422666ab..c1fcda72ee3d 100644 --- a/tools/type_whisperer/proto_build_targets_gen.py +++ b/tools/type_whisperer/proto_build_targets_gen.py @@ -1,7 +1,9 @@ # Generate api/BUILD based on API type database. This contains target for v2, v3 -# and all API protos. +# and all API protos. This is not the ideal way to be generating docs, see +# https://github.com/envoyproxy/envoy/issues/10311#issuecomment-603518498. import os +import re import string import sys @@ -9,6 +11,18 @@ from google.protobuf import text_format +V2_REGEXES = list( + map(re.compile, [ + r'envoy[\w\.]*\.(v1alpha\d?|v1)', + r'envoy[\w\.]*\.(v2alpha\d?|v2)', + r'envoy\.type\.matcher', + r'envoy\.type', + r'envoy\.config\.cluster\.redis', + r'envoy\.config\.retry\.previous_priorities', + ])) + +V3_REGEX = re.compile(r'envoy[\w\.]*\.(v3alpha|v3)') + API_BUILD_FILE_TEMPLATE = string.Template( """# DO NOT EDIT. This file is generated by tools/proto_sync.py. @@ -69,25 +83,33 @@ def DepsFormat(pkgs): for p in sorted(FilterPkgs(pkgs), key=BuildOrderKey)) +def IsV2Package(pkg): + for regex in V2_REGEXES: + if regex.match(pkg): + return True + return False + + +def IsV3Package(pkg): + return V3_REGEX.match(pkg) is not None + + if __name__ == '__main__': type_db_path, output_path = sys.argv[1:] type_db = LoadTypeDb(type_db_path) # TODO(htuch): generalize to > 2 versions v2_packages = set([]) v3_packages = set([]) - # Phase 1, we use the upgraded packages as a means to figure out which - # packages are explicitly in v2 vs. v3 by virtue of some breaking change. - for name, desc in type_db.types.items(): - if desc.next_version_type_name: - v2_packages.add(desc.qualified_package) - v3_packages.add(type_db.types[desc.next_version_type_name].qualified_package) - # Phase 2, we collect the rest of the packages not already binned and consider - # them to be both v2 and v3. - for name, desc in type_db.types.items(): + for desc in type_db.types.values(): pkg = desc.qualified_package - if pkg not in v2_packages and pkg not in v3_packages: - v2_packages.add(pkg) + if IsV3Package(pkg): v3_packages.add(pkg) + continue + if IsV2Package(pkg): + v2_packages.add(pkg) + # Special case for v2 packages that are part of v3 (still active) + if not desc.next_version_type_name: + v3_packages.add(pkg) # Generate BUILD file. build_file_contents = API_BUILD_FILE_TEMPLATE.substitute(v2_deps=DepsFormat(v2_packages), v3_deps=DepsFormat(v3_packages)) diff --git a/tools/type_whisperer/type_whisperer.py b/tools/type_whisperer/type_whisperer.py index a185d58eb8b8..611574d705bd 100755 --- a/tools/type_whisperer/type_whisperer.py +++ b/tools/type_whisperer/type_whisperer.py @@ -7,6 +7,7 @@ from tools.type_whisperer.types_pb2 import Types from udpa.annotations import migrate_pb2 +from udpa.annotations import status_pb2 class TypeWhispererVisitor(visitor.Visitor): @@ -25,9 +26,12 @@ def VisitService(self, service_proto, type_context): def VisitEnum(self, enum_proto, type_context): type_desc = self._types.types[type_context.name] type_desc.next_version_upgrade = any(v.options.deprecated for v in enum_proto.value) + type_desc.deprecated_type = type_context.deprecated def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): type_desc = self._types.types[type_context.name] + type_desc.map_entry = msg_proto.options.map_entry + type_desc.deprecated_type = type_context.deprecated type_deps = set([]) for f in msg_proto.field: if f.type_name.startswith('.'): @@ -37,12 +41,14 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): type_desc.type_dependencies.extend(type_deps) def VisitFile(self, file_proto, type_context, services, msgs, enums): - next_version_package = "" + next_version_package = '' if file_proto.options.HasExtension(migrate_pb2.file_migrate): next_version_package = file_proto.options.Extensions[migrate_pb2.file_migrate].move_to_package for t in self._types.types.values(): t.qualified_package = file_proto.package t.proto_path = file_proto.name + t.active = file_proto.options.Extensions[ + status_pb2.file_status].package_version_status == status_pb2.ACTIVE if next_version_package: t.next_version_package = next_version_package t.next_version_upgrade = True diff --git a/tools/type_whisperer/typedb_gen.py b/tools/type_whisperer/typedb_gen.py index c530e47e7ee1..660de09049a6 100644 --- a/tools/type_whisperer/typedb_gen.py +++ b/tools/type_whisperer/typedb_gen.py @@ -13,6 +13,7 @@ # Regexes governing v3upgrades. TODO(htuch): The regex approach will have # to be rethought as we go beyond v3, this is WiP. TYPE_UPGRADE_REGEXES = [ + (r'(envoy[\w\.]*\.)(v3alpha|v3)', r'\1v4alpha'), (r'(envoy[\w\.]*\.)(v1alpha\d?|v1)', r'\1v3'), (r'(envoy[\w\.]*\.)(v2alpha\d?|v2)', r'\1v3'), # These are special cases, e.g. upgrading versionless packages. @@ -70,6 +71,8 @@ def UpgradedTypeWithDescription(type_name, type_desc): upgrade_type_desc.qualified_package = UpgradedPackage(type_desc) upgrade_type_desc.proto_path = UpgradedPath(type_desc.proto_path, upgrade_type_desc.qualified_package) + upgrade_type_desc.deprecated_type = type_desc.deprecated_type + upgrade_type_desc.map_entry = type_desc.map_entry return (UpgradedType(type_name, type_desc), upgrade_type_desc) @@ -149,12 +152,16 @@ def NextVersionUpgrade(type_name, type_map, next_version_upgrade_memo, visited=N if NextVersionUpgrade(type_name, type_map, next_version_upgrade_memo) ]).union(set(['envoy.config.retry.previous_priorities', 'envoy.config.cluster.redis'])) - # Generate type map entries for upgraded types. - type_map.update([ - UpgradedTypeWithDescription(type_name, type_desc) - for type_name, type_desc in type_map.items() - if type_desc.qualified_package in next_versions_pkgs - ]) + # Generate type map entries for upgraded types. We run this twice to allow + # things like a v2 deprecated map field's synthesized map entry to forward + # propagate to v4alpha (for shadowing purposes). + for _ in range(2): + type_map.update([ + UpgradedTypeWithDescription(type_name, type_desc) + for type_name, type_desc in type_map.items() + if type_desc.qualified_package in next_versions_pkgs and + (type_desc.active or type_desc.deprecated_type or type_desc.map_entry) + ]) # Generate the type database proto. To provide some stability across runs, in # terms of the emitted proto binary blob that we track in git, we sort before diff --git a/tools/type_whisperer/types.proto b/tools/type_whisperer/types.proto index 493b5e363157..1f64931ef233 100644 --- a/tools/type_whisperer/types.proto +++ b/tools/type_whisperer/types.proto @@ -19,6 +19,15 @@ message TypeDescription { // The package of the type in next version string next_version_package = 6; + + // Is this a type in an active package? + bool active = 7; + + // Is this type a synthesized map entry? + bool map_entry = 8; + + // Is this type deprecated? + bool deprecated_type = 9; } message Types { diff --git a/windows/.bazelrc b/windows/.bazelrc index c36d03627fd5..e48a2e1baa85 100644 --- a/windows/.bazelrc +++ b/windows/.bazelrc @@ -3,8 +3,7 @@ build --action_env=PATH build --action_env=TMPDIR -# see top-level .bazelrc for explanation -startup --host_jvm_args=-Xmx2g +build --experimental_local_memory_estimate build --define signal_trace=disabled build --define hot_restart=disabled @@ -23,7 +22,9 @@ build --define absl=1 # Should not be required after upstream fix, see issue; # https://github.com/bazelbuild/rules_foreign_cc/issues/301 +build --copt="-DCARES_STATICLIB" build --copt="-DNGHTTP2_STATICLIB" +build --copt="-DCURL_STATICLIB" # Required to work around quiche build defect # Unguarded gcc pragmas are not recognized by MSVC