From 5536ce73026fcbf1c176c09d409a981d67981047 Mon Sep 17 00:00:00 2001 From: Yaroslav Skopets Date: Fri, 8 Nov 2019 22:43:56 +0100 Subject: [PATCH 01/14] tcp_proxy: add support for subset load balancing (#8870) Add support for subset load balancing into tcp_proxy Risk Level: Medium Testing: unit tests, integration tests, manual testing Docs Changes: docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst Release Notes: docs/root/intro/version_history.rst Fixes #8769 This PR adds support for metadata_match field per weighted cluster, e.g. name: envoy.tcp_proxy typedConfig: '@type': type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy statPrefix: postgres.internal:5432 weighted_clusters: clusters: - name: postgres.internal:5432 metadata_match: # implemented by this PR filter_metadata: envoy.lb: role: master weight: 10 - name: postgres.internal:5432 metadata_match: # implemented by this PR filter_metadata: envoy.lb: role: replica weight: 90 Signed-off-by: Yaroslav Skopets --- .../network/tcp_proxy/v2/tcp_proxy.proto | 8 + .../network/tcp_proxy/v3alpha/tcp_proxy.proto | 8 + .../network_filters/tcp_proxy_filter.rst | 17 + docs/root/intro/version_history.rst | 1 + source/common/tcp_proxy/tcp_proxy.cc | 121 ++-- source/common/tcp_proxy/tcp_proxy.h | 85 ++- test/common/tcp_proxy/tcp_proxy_test.cc | 572 +++++++++++++++++- .../integration/tcp_proxy_integration_test.cc | 280 +++++++++ 8 files changed, 1003 insertions(+), 89 deletions(-) diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index 889a9a0c9871..373d072bdbb8 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -90,6 +90,14 @@ message TcpProxy { // determined by its weight. The sum of weights across all entries in the // clusters array determines the total weight. uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what is set in this field will be considered + // for load balancing. Note that this will be merged with what's provided in + // :ref:`TcpProxy.metadata_match + // `, with values + // here taking precedence. The filter name should be specified as *envoy.lb*. + api.v2.core.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. diff --git a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto index c8becda9d243..9ba8419dbd14 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto @@ -34,6 +34,14 @@ message TcpProxy { // determined by its weight. The sum of weights across all entries in the // clusters array determines the total weight. uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what is set in this field will be considered + // for load balancing. Note that this will be merged with what's provided in + // :ref:`TcpProxy.metadata_match + // `, with + // values here taking precedence. The filter name should be specified as *envoy.lb*. + api.v3alpha.core.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. diff --git a/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst index ab68c24acfef..5c68939cec89 100644 --- a/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst @@ -17,6 +17,23 @@ other network filters on a per-connection basis by setting a per-connection state object under the key `envoy.tcp_proxy.cluster`. See the implementation for the details. +.. _config_network_filters_tcp_proxy_subset_lb: + +Routing to a subset of hosts +---------------------------- + +TCP proxy can be configured to route to a subset of hosts within an upstream cluster. + +To define metadata that a suitable upstream host must match, use one of the following fields: + +#. Use :ref:`TcpProxy.metadata_match` + to define required metadata for a single upstream cluster. +#. Use :ref:`ClusterWeight.metadata_match` + to define required metadata for a weighted upstream cluster. +#. Use combination of :ref:`TcpProxy.metadata_match` + and :ref:`ClusterWeight.metadata_match` + to define required metadata for a weighted upstream cluster (metadata from the latter will be merged on top of the former). + .. _config_network_filters_tcp_proxy_stats: Statistics diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index bb650cb96350..115b29f8c038 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -9,6 +9,7 @@ Version history * logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. * redis: performance improvement for larger split commands by avoiding string copies. * router: added support for REQ(header-name) :ref:`header formatter `. +* tcp_proxy: added :ref:`ClusterWeight.metadata_match` * tcp_proxy: added :ref:`hash_policy` * tls: remove TLS 1.0 and 1.1 from client defaults diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index 430773b6003b..a80eaa4ebcea 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -31,8 +31,10 @@ const std::string& PerConnectionCluster::key() { CONSTRUCT_ON_FIRST_USE(std::string, "envoy.tcp_proxy.cluster"); } -Config::Route::Route( - const envoy::config::filter::network::tcp_proxy::v2::TcpProxy::DeprecatedV1::TCPRoute& config) { +Config::RouteImpl::RouteImpl( + const Config& parent, + const envoy::config::filter::network::tcp_proxy::v2::TcpProxy::DeprecatedV1::TCPRoute& config) + : parent_(parent) { cluster_name_ = config.cluster(); source_ips_ = Network::Address::IpList(config.source_ip_list()); @@ -47,10 +49,48 @@ Config::Route::Route( } } +bool Config::RouteImpl::matches(Network::Connection& connection) const { + if (!source_port_ranges_.empty() && + !Network::Utility::portInRangeList(*connection.remoteAddress(), source_port_ranges_)) { + return false; + } + + if (!source_ips_.empty() && !source_ips_.contains(*connection.remoteAddress())) { + return false; + } + + if (!destination_port_ranges_.empty() && + !Network::Utility::portInRangeList(*connection.localAddress(), destination_port_ranges_)) { + return false; + } + + if (!destination_ips_.empty() && !destination_ips_.contains(*connection.localAddress())) { + return false; + } + + // if we made it past all checks, the route matches + return true; +} + Config::WeightedClusterEntry::WeightedClusterEntry( + const Config& parent, const envoy::config::filter::network::tcp_proxy::v2::TcpProxy::WeightedCluster::ClusterWeight& config) - : cluster_name_(config.name()), cluster_weight_(config.weight()) {} + : parent_(parent), cluster_name_(config.name()), cluster_weight_(config.weight()) { + if (config.has_metadata_match()) { + const auto filter_it = config.metadata_match().filter_metadata().find( + Envoy::Config::MetadataFilters::get().ENVOY_LB); + if (filter_it != config.metadata_match().filter_metadata().end()) { + if (parent.cluster_metadata_match_criteria_) { + metadata_match_criteria_ = + parent.cluster_metadata_match_criteria_->mergeMatchCriteria(filter_it->second); + } else { + metadata_match_criteria_ = + std::make_unique(filter_it->second); + } + } + } +} Config::SharedConfig::SharedConfig( const envoy::config::filter::network::tcp_proxy::v2::TcpProxy& config, @@ -81,27 +121,14 @@ Config::Config(const envoy::config::filter::network::tcp_proxy::v2::TcpProxy& co if (config.has_deprecated_v1()) { for (const envoy::config::filter::network::tcp_proxy::v2::TcpProxy::DeprecatedV1::TCPRoute& route_desc : config.deprecated_v1().routes()) { - routes_.emplace_back(Route(route_desc)); + routes_.emplace_back(std::make_shared(*this, route_desc)); } } if (!config.cluster().empty()) { envoy::config::filter::network::tcp_proxy::v2::TcpProxy::DeprecatedV1::TCPRoute default_route; default_route.set_cluster(config.cluster()); - routes_.emplace_back(default_route); - } - - // Weighted clusters will be enabled only if both the default cluster and - // deprecated v1 routes are absent. - if (routes_.empty() && config.has_weighted_clusters()) { - total_cluster_weight_ = 0; - for (const envoy::config::filter::network::tcp_proxy::v2::TcpProxy::WeightedCluster:: - ClusterWeight& cluster_desc : config.weighted_clusters().clusters()) { - std::unique_ptr cluster_entry( - std::make_unique(cluster_desc)); - weighted_clusters_.emplace_back(std::move(cluster_entry)); - total_cluster_weight_ += weighted_clusters_.back()->clusterWeight(); - } + routes_.emplace_back(std::make_shared(*this, default_route)); } if (config.has_metadata_match()) { @@ -115,6 +142,19 @@ Config::Config(const envoy::config::filter::network::tcp_proxy::v2::TcpProxy& co } } + // Weighted clusters will be enabled only if both the default cluster and + // deprecated v1 routes are absent. + if (routes_.empty() && config.has_weighted_clusters()) { + total_cluster_weight_ = 0; + for (const envoy::config::filter::network::tcp_proxy::v2::TcpProxy::WeightedCluster:: + ClusterWeight& cluster_desc : config.weighted_clusters().clusters()) { + WeightedClusterEntryConstSharedPtr cluster_entry( + std::make_shared(*this, cluster_desc)); + weighted_clusters_.emplace_back(std::move(cluster_entry)); + total_cluster_weight_ += weighted_clusters_.back()->clusterWeight(); + } + } + for (const envoy::config::filter::accesslog::v2::AccessLog& log_config : config.access_log()) { access_logs_.emplace_back(AccessLog::AccessLogFactory::fromProto(log_config, context)); } @@ -124,53 +164,36 @@ Config::Config(const envoy::config::filter::network::tcp_proxy::v2::TcpProxy& co } } -const std::string& Config::getRegularRouteFromEntries(Network::Connection& connection) { +RouteConstSharedPtr Config::getRegularRouteFromEntries(Network::Connection& connection) { // First check if the per-connection state to see if we need to route to a pre-selected cluster if (connection.streamInfo().filterState().hasData( PerConnectionCluster::key())) { const PerConnectionCluster& per_connection_cluster = connection.streamInfo().filterState().getDataReadOnly( PerConnectionCluster::key()); - return per_connection_cluster.value(); - } - for (const Config::Route& route : routes_) { - if (!route.source_port_ranges_.empty() && - !Network::Utility::portInRangeList(*connection.remoteAddress(), - route.source_port_ranges_)) { - continue; - } - - if (!route.source_ips_.empty() && !route.source_ips_.contains(*connection.remoteAddress())) { - continue; - } - - if (!route.destination_port_ranges_.empty() && - !Network::Utility::portInRangeList(*connection.localAddress(), - route.destination_port_ranges_)) { - continue; - } + envoy::config::filter::network::tcp_proxy::v2::TcpProxy::DeprecatedV1::TCPRoute + per_connection_route; + per_connection_route.set_cluster(per_connection_cluster.value()); + return std::make_shared(*this, per_connection_route); + } - if (!route.destination_ips_.empty() && - !route.destination_ips_.contains(*connection.localAddress())) { - continue; + for (const RouteConstSharedPtr& route : routes_) { + if (route->matches(connection)) { + return route; } - - // if we made it past all checks, the route matches - return route.cluster_name_; } // no match, no more routes to try - return EMPTY_STRING; + return nullptr; } -const std::string& Config::getRouteFromEntries(Network::Connection& connection) { +RouteConstSharedPtr Config::getRouteFromEntries(Network::Connection& connection) { if (weighted_clusters_.empty()) { return getRegularRouteFromEntries(connection); } return WeightedClusterUtil::pickCluster(weighted_clusters_, total_cluster_weight_, - random_generator_.random(), false) - ->clusterName(); + random_generator_.random(), false); } UpstreamDrainManager& Config::drainManager() { @@ -342,7 +365,9 @@ void Filter::UpstreamCallbacks::drain(Drainer& drainer) { Network::FilterStatus Filter::initializeUpstreamConnection() { ASSERT(upstream_conn_data_ == nullptr); - const std::string& cluster_name = getUpstreamCluster(); + route_ = pickRoute(); + + const std::string& cluster_name = route_ ? route_->clusterName() : EMPTY_STRING; Upstream::ThreadLocalCluster* thread_local_cluster = cluster_manager_.get(cluster_name); diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index e59fe80a9f17..a9a7e1d50a4b 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -58,6 +58,34 @@ struct TcpProxyStats { class Drainer; class UpstreamDrainManager; +/** + * Route is an individual resolved route for a connection. + */ +class Route { +public: + virtual ~Route() = default; + + /** + * Check whether this route matches a given connection. + * @param connection supplies the connection to test against. + * @return bool true if this route matches a given connection. + */ + virtual bool matches(Network::Connection& connection) const PURE; + + /** + * @return const std::string& the upstream cluster that owns the route. + */ + virtual const std::string& clusterName() const PURE; + + /** + * @return MetadataMatchCriteria* the metadata that a subset load balancer should match when + * selecting an upstream host + */ + virtual const Router::MetadataMatchCriteria* metadataMatchCriteria() const PURE; +}; + +using RouteConstSharedPtr = std::shared_ptr; + /** * Filter configuration. * @@ -97,11 +125,11 @@ class Config { * parameters of a downstream connection. * @param connection supplies the parameters of the downstream connection for * which the proxy needs to open the corresponding upstream. - * @return the cluster name to be used for the upstream connection. - * If no route applies, returns the empty string. + * @return the route to be used for the upstream connection. + * If no route applies, returns nullptr. */ - const std::string& getRouteFromEntries(Network::Connection& connection); - const std::string& getRegularRouteFromEntries(Network::Connection& connection); + RouteConstSharedPtr getRouteFromEntries(Network::Connection& connection); + RouteConstSharedPtr getRegularRouteFromEntries(Network::Connection& connection); const TcpProxyStats& stats() { return shared_config_->stats(); } const std::vector& accessLogs() { return access_logs_; } @@ -111,16 +139,25 @@ class Config { } UpstreamDrainManager& drainManager(); SharedConfigSharedPtr sharedConfig() { return shared_config_; } - const Router::MetadataMatchCriteria* metadataMatchCriteria() { + const Router::MetadataMatchCriteria* metadataMatchCriteria() const { return cluster_metadata_match_criteria_.get(); } const Network::HashPolicy* hashPolicy() { return hash_policy_.get(); } private: - struct Route { - Route(const envoy::config::filter::network::tcp_proxy::v2::TcpProxy::DeprecatedV1::TCPRoute& - config); + struct RouteImpl : public Route { + RouteImpl(const Config& parent, + const envoy::config::filter::network::tcp_proxy::v2::TcpProxy::DeprecatedV1::TCPRoute& + config); + + // Route + bool matches(Network::Connection& connection) const override; + const std::string& clusterName() const override { return cluster_name_; } + const Router::MetadataMatchCriteria* metadataMatchCriteria() const override { + return parent_.metadataMatchCriteria(); + } + const Config& parent_; Network::Address::IpList source_ips_; Network::PortRangeList source_port_ranges_; Network::Address::IpList destination_ips_; @@ -128,22 +165,34 @@ class Config { std::string cluster_name_; }; - class WeightedClusterEntry { + class WeightedClusterEntry : public Route { public: - WeightedClusterEntry(const envoy::config::filter::network::tcp_proxy::v2::TcpProxy:: + WeightedClusterEntry(const Config& parent, + const envoy::config::filter::network::tcp_proxy::v2::TcpProxy:: WeightedCluster::ClusterWeight& config); - const std::string& clusterName() const { return cluster_name_; } uint64_t clusterWeight() const { return cluster_weight_; } + // Route + bool matches(Network::Connection&) const override { return false; } + const std::string& clusterName() const override { return cluster_name_; } + const Router::MetadataMatchCriteria* metadataMatchCriteria() const override { + if (metadata_match_criteria_) { + return metadata_match_criteria_.get(); + } + return parent_.metadataMatchCriteria(); + } + private: + const Config& parent_; const std::string cluster_name_; const uint64_t cluster_weight_; + Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_; }; - using WeightedClusterEntrySharedPtr = std::unique_ptr; + using WeightedClusterEntryConstSharedPtr = std::shared_ptr; - std::vector routes_; - std::vector weighted_clusters_; + std::vector routes_; + std::vector weighted_clusters_; uint64_t total_cluster_weight_; std::vector access_logs_; const uint32_t max_connect_attempts_; @@ -196,7 +245,10 @@ class Filter : public Network::ReadFilter, // Upstream::LoadBalancerContext const Router::MetadataMatchCriteria* metadataMatchCriteria() override { - return config_->metadataMatchCriteria(); + if (route_) { + return route_->metadataMatchCriteria(); + } + return nullptr; } // Upstream::LoadBalancerContext @@ -272,7 +324,7 @@ class Filter : public Network::ReadFilter, }; // Callbacks for different error and success states during connection establishment - virtual const std::string& getUpstreamCluster() { + virtual RouteConstSharedPtr pickRoute() { return config_->getRouteFromEntries(read_callbacks_->connection()); } @@ -300,6 +352,7 @@ class Filter : public Network::ReadFilter, std::shared_ptr upstream_callbacks_; // shared_ptr required for passing as a // read filter. StreamInfo::StreamInfoImpl stream_info_; + RouteConstSharedPtr route_; Network::TransportSocketOptionsSharedPtr transport_socket_options_; uint32_t connect_attempts_{}; bool connecting_{}; diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index 3981c410ad84..523724f07961 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -191,7 +191,8 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { // hit route with destination_ip (10.10.10.10/32) NiceMock connection; connection.local_address_ = std::make_shared("10.10.10.10"); - EXPECT_EQ(std::string("with_destination_ip_list"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_destination_ip_list"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -199,14 +200,15 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { NiceMock connection; connection.local_address_ = std::make_shared("10.10.10.11"); connection.remote_address_ = std::make_shared("0.0.0.0"); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); } { // hit route with destination_ip (10.10.11.0/24) NiceMock connection; connection.local_address_ = std::make_shared("10.10.11.11"); - EXPECT_EQ(std::string("with_destination_ip_list"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_destination_ip_list"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -214,14 +216,15 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { NiceMock connection; connection.local_address_ = std::make_shared("10.10.12.12"); connection.remote_address_ = std::make_shared("0.0.0.0"); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); } { // hit route with destination_ip (10.11.0.0/16) NiceMock connection; connection.local_address_ = std::make_shared("10.11.11.11"); - EXPECT_EQ(std::string("with_destination_ip_list"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_destination_ip_list"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -229,14 +232,15 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { NiceMock connection; connection.local_address_ = std::make_shared("10.12.12.12"); connection.remote_address_ = std::make_shared("0.0.0.0"); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); } { // hit route with destination_ip (11.0.0.0/8) NiceMock connection; connection.local_address_ = std::make_shared("11.11.11.11"); - EXPECT_EQ(std::string("with_destination_ip_list"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_destination_ip_list"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -244,21 +248,23 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { NiceMock connection; connection.local_address_ = std::make_shared("12.12.12.12"); connection.remote_address_ = std::make_shared("0.0.0.0"); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); } { // hit route with destination_ip (128.0.0.0/8) NiceMock connection; connection.local_address_ = std::make_shared("128.255.255.255"); - EXPECT_EQ(std::string("with_destination_ip_list"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_destination_ip_list"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { // hit route with destination port range NiceMock connection; connection.local_address_ = std::make_shared("1.2.3.4", 12345); - EXPECT_EQ(std::string("with_destination_ports"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_destination_ports"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -266,7 +272,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { NiceMock connection; connection.local_address_ = std::make_shared("1.2.3.4", 23456); connection.remote_address_ = std::make_shared("0.0.0.0"); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -274,7 +280,8 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { NiceMock connection; connection.local_address_ = std::make_shared("1.2.3.4", 23456); connection.remote_address_ = std::make_shared("0.0.0.0", 23459); - EXPECT_EQ(std::string("with_source_ports"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_source_ports"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -282,7 +289,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { NiceMock connection; connection.local_address_ = std::make_shared("1.2.3.4", 23456); connection.remote_address_ = std::make_shared("0.0.0.0", 23458); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -291,7 +298,8 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { connection.local_address_ = std::make_shared("10.0.0.0", 10000); connection.remote_address_ = std::make_shared("20.0.0.0", 20000); - EXPECT_EQ(std::string("with_everything"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_everything"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -300,14 +308,15 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { connection.local_address_ = std::make_shared("10.0.0.0", 10000); connection.remote_address_ = std::make_shared("30.0.0.0", 20000); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); } { // hit route with destination_ip (::1/128) NiceMock connection; connection.local_address_ = std::make_shared("::1"); - EXPECT_EQ(std::string("with_v6_destination"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_v6_destination"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -315,7 +324,8 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { NiceMock connection; connection.local_address_ = std::make_shared("2001:abcd:0:0:1::"); - EXPECT_EQ(std::string("with_v6_destination"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("with_v6_destination"), + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -326,7 +336,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { connection.remote_address_ = std::make_shared("2003:0:0:0:0::5"); EXPECT_EQ(std::string("with_v6_source_and_destination"), - config_obj.getRouteFromEntries(connection)); + config_obj.getRouteFromEntries(connection)->clusterName()); } { @@ -334,10 +344,433 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { NiceMock connection; connection.local_address_ = std::make_shared("2004::"); connection.remote_address_ = std::make_shared("::"); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)); + EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); } } +// Tests that a deprecated_v1 route gets the top-level endpoint selector. +TEST(ConfigTest, DEPRECATED_FEATURE_TEST(RouteWithTopLevelMetadataMatchConfig)) { + const std::string yaml = R"EOF( + stat_prefix: name + cluster: cluster + deprecated_v1: + routes: + - cluster: catch_all + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 + k2: v2 +)EOF"; + + NiceMock factory_context_; + Config config_obj(constructConfigFromYaml(yaml, factory_context_)); + + ProtobufWkt::Value v1, v2; + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv1(v1), hv2(v2); + + NiceMock connection; + const auto route = config_obj.getRouteFromEntries(connection); + EXPECT_NE(nullptr, route); + + EXPECT_EQ("catch_all", route->clusterName()); + + const auto* criteria = route->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + + const auto& criterions = criteria->metadataMatchCriteria(); + EXPECT_EQ(2, criterions.size()); + + EXPECT_EQ("k1", criterions[0]->name()); + EXPECT_EQ(hv1, criterions[0]->value()); + + EXPECT_EQ("k2", criterions[1]->name()); + EXPECT_EQ(hv2, criterions[1]->value()); +} + +// Tests that it's not possible to define a weighted cluster with 0 weight. +TEST(ConfigTest, WeightedClusterWithZeroWeightConfig) { + const std::string yaml = R"EOF( + stat_prefix: name + weighted_clusters: + clusters: + - name: cluster1 + weight: 1 + - name: cluster2 +)EOF"; + + NiceMock factory_context; + EXPECT_THROW(constructConfigFromV2Yaml(yaml, factory_context), EnvoyException); +} + +// Tests that it is possible to define a list of weighted clusters. +TEST(ConfigTest, WeightedClustersConfig) { + const std::string yaml = R"EOF( + stat_prefix: name + weighted_clusters: + clusters: + - name: cluster1 + weight: 1 + - name: cluster2 + weight: 2 +)EOF"; + + NiceMock factory_context; + Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + + NiceMock connection; + EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(0)); + EXPECT_EQ(std::string("cluster1"), config_obj.getRouteFromEntries(connection)->clusterName()); + + EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(2)); + EXPECT_EQ(std::string("cluster2"), config_obj.getRouteFromEntries(connection)->clusterName()); +} + +// Tests that it is possible to define a list of weighted clusters with independent endpoint +// selectors. +TEST(ConfigTest, WeightedClustersWithMetadataMatchConfig) { + const std::string yaml = R"EOF( + stat_prefix: name + weighted_clusters: + clusters: + - name: cluster1 + weight: 1 + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 + k2: v2 + - name: cluster2 + weight: 2 + metadata_match: + filter_metadata: + envoy.lb: + k3: v3 + k4: v4 +)EOF"; + + NiceMock factory_context; + Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + + { + ProtobufWkt::Value v1, v2; + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv1(v1), hv2(v2); + + NiceMock connection; + EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(0)); + + const auto route = config_obj.getRouteFromEntries(connection); + EXPECT_NE(nullptr, route); + + EXPECT_EQ("cluster1", route->clusterName()); + + const auto* criteria = route->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + + const auto& criterions = criteria->metadataMatchCriteria(); + EXPECT_EQ(2, criterions.size()); + + EXPECT_EQ("k1", criterions[0]->name()); + EXPECT_EQ(hv1, criterions[0]->value()); + + EXPECT_EQ("k2", criterions[1]->name()); + EXPECT_EQ(hv2, criterions[1]->value()); + } + + { + ProtobufWkt::Value v3, v4; + v3.set_string_value("v3"); + v4.set_string_value("v4"); + HashedValue hv3(v3), hv4(v4); + + NiceMock connection; + EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(2)); + + const auto route = config_obj.getRouteFromEntries(connection); + EXPECT_NE(nullptr, route); + + EXPECT_EQ("cluster2", route->clusterName()); + + const auto* criteria = route->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + + const auto& criterions = criteria->metadataMatchCriteria(); + EXPECT_EQ(2, criterions.size()); + + EXPECT_EQ("k3", criterions[0]->name()); + EXPECT_EQ(hv3, criterions[0]->value()); + + EXPECT_EQ("k4", criterions[1]->name()); + EXPECT_EQ(hv4, criterions[1]->value()); + } +} + +// Tests that an individual endpoint selector of a weighted cluster gets merged with the top-level +// endpoint selector. +TEST(ConfigTest, WeightedClustersWithMetadataMatchAndTopLevelMetadataMatchConfig) { + const std::string yaml = R"EOF( + stat_prefix: name + weighted_clusters: + clusters: + - name: cluster1 + weight: 1 + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 + k2: v2 + - name: cluster2 + weight: 2 + metadata_match: + filter_metadata: + envoy.lb: + k3: v3 + k4: v4 + metadata_match: + filter_metadata: + envoy.lb: + k0: v00 + k1: v01 + k4: v04 +)EOF"; + + NiceMock factory_context; + Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + + ProtobufWkt::Value v00, v01, v04; + v00.set_string_value("v00"); + v01.set_string_value("v01"); + v04.set_string_value("v04"); + HashedValue hv00(v00), hv01(v01), hv04(v04); + + { + ProtobufWkt::Value v1, v2; + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv1(v1), hv2(v2); + + NiceMock connection; + EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(0)); + + const auto route = config_obj.getRouteFromEntries(connection); + EXPECT_NE(nullptr, route); + + EXPECT_EQ("cluster1", route->clusterName()); + + const auto* criteria = route->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + + const auto& criterions = criteria->metadataMatchCriteria(); + EXPECT_EQ(4, criterions.size()); + + EXPECT_EQ("k0", criterions[0]->name()); + EXPECT_EQ(hv00, criterions[0]->value()); + + EXPECT_EQ("k1", criterions[1]->name()); + EXPECT_EQ(hv1, criterions[1]->value()); + + EXPECT_EQ("k2", criterions[2]->name()); + EXPECT_EQ(hv2, criterions[2]->value()); + + EXPECT_EQ("k4", criterions[3]->name()); + EXPECT_EQ(hv04, criterions[3]->value()); + } + + { + ProtobufWkt::Value v3, v4; + v3.set_string_value("v3"); + v4.set_string_value("v4"); + HashedValue hv3(v3), hv4(v4); + + NiceMock connection; + EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(2)); + + const auto route = config_obj.getRouteFromEntries(connection); + EXPECT_NE(nullptr, route); + + EXPECT_EQ("cluster2", route->clusterName()); + + const auto* criteria = route->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + + const auto& criterions = criteria->metadataMatchCriteria(); + EXPECT_EQ(4, criterions.size()); + + EXPECT_EQ("k0", criterions[0]->name()); + EXPECT_EQ(hv00, criterions[0]->value()); + + EXPECT_EQ("k1", criterions[1]->name()); + EXPECT_EQ(hv01, criterions[1]->value()); + + EXPECT_EQ("k3", criterions[2]->name()); + EXPECT_EQ(hv3, criterions[2]->value()); + + EXPECT_EQ("k4", criterions[3]->name()); + EXPECT_EQ(hv4, criterions[3]->value()); + } +} + +// Tests that a weighted cluster gets the top-level endpoint selector. +TEST(ConfigTest, WeightedClustersWithTopLevelMetadataMatchConfig) { + const std::string yaml = R"EOF( + stat_prefix: name + weighted_clusters: + clusters: + - name: cluster1 + weight: 1 + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 + k2: v2 +)EOF"; + + NiceMock factory_context; + Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + + ProtobufWkt::Value v1, v2; + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv1(v1), hv2(v2); + + NiceMock connection; + const auto route = config_obj.getRouteFromEntries(connection); + EXPECT_NE(nullptr, route); + + EXPECT_EQ("cluster1", route->clusterName()); + + const auto* criteria = route->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + + const auto& criterions = criteria->metadataMatchCriteria(); + EXPECT_EQ(2, criterions.size()); + + EXPECT_EQ("k1", criterions[0]->name()); + EXPECT_EQ(hv1, criterions[0]->value()); + + EXPECT_EQ("k2", criterions[1]->name()); + EXPECT_EQ(hv2, criterions[1]->value()); +} + +// Tests that it is possible to define the top-level endpoint selector. +TEST(ConfigTest, TopLevelMetadataMatchConfig) { + const std::string yaml = R"EOF( + stat_prefix: name + cluster: foo + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 + k2: v2 +)EOF"; + + NiceMock factory_context; + Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + + ProtobufWkt::Value v1, v2; + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv1(v1), hv2(v2); + + const auto* criteria = config_obj.metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + + const auto& criterions = criteria->metadataMatchCriteria(); + EXPECT_EQ(2, criterions.size()); + + EXPECT_EQ("k1", criterions[0]->name()); + EXPECT_EQ(hv1, criterions[0]->value()); + + EXPECT_EQ("k2", criterions[1]->name()); + EXPECT_EQ(hv2, criterions[1]->value()); +} + +// Tests that a regular cluster gets the top-level endpoint selector. +TEST(ConfigTest, ClusterWithTopLevelMetadataMatchConfig) { + const std::string yaml = R"EOF( + stat_prefix: name + cluster: foo + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 + k2: v2 +)EOF"; + + NiceMock factory_context; + Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + + ProtobufWkt::Value v1, v2; + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv1(v1), hv2(v2); + + NiceMock connection; + const auto route = config_obj.getRouteFromEntries(connection); + EXPECT_NE(nullptr, route); + + EXPECT_EQ("foo", route->clusterName()); + + const auto* criteria = route->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + + const auto& criterions = criteria->metadataMatchCriteria(); + EXPECT_EQ(2, criterions.size()); + + EXPECT_EQ("k1", criterions[0]->name()); + EXPECT_EQ(hv1, criterions[0]->value()); + + EXPECT_EQ("k2", criterions[1]->name()); + EXPECT_EQ(hv2, criterions[1]->value()); +} + +// Tests that a per connection cluster gets the top-level endpoint selector. +TEST(ConfigTest, PerConnectionClusterWithTopLevelMetadataMatchConfig) { + const std::string yaml = R"EOF( + stat_prefix: name + cluster: foo + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 + k2: v2 +)EOF"; + + NiceMock factory_context; + Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + + ProtobufWkt::Value v1, v2; + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv1(v1), hv2(v2); + + NiceMock connection; + connection.stream_info_.filterState().setData( + "envoy.tcp_proxy.cluster", std::make_unique("filter_state_cluster"), + StreamInfo::FilterState::StateType::Mutable); + + const auto route = config_obj.getRouteFromEntries(connection); + EXPECT_NE(nullptr, route); + + EXPECT_EQ("filter_state_cluster", route->clusterName()); + + const auto* criteria = route->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + + const auto& criterions = criteria->metadataMatchCriteria(); + EXPECT_EQ(2, criterions.size()); + + EXPECT_EQ("k1", criterions[0]->name()); + EXPECT_EQ(hv1, criterions[0]->value()); + + EXPECT_EQ("k2", criterions[1]->name()); + EXPECT_EQ(hv2, criterions[1]->value()); +} + TEST(ConfigTest, HashWithSourceIpConfig) { const std::string yaml = R"EOF( stat_prefix: name @@ -539,7 +972,7 @@ TEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(DefaultRoutes)) { configure(config); NiceMock connection; - EXPECT_EQ(std::string("fake_cluster"), config_->getRouteFromEntries(connection)); + EXPECT_EQ(std::string("fake_cluster"), config_->getRouteFromEntries(connection)->clusterName()); } // Tests that half-closes are proxied and don't themselves cause any connection to be closed. @@ -764,7 +1197,7 @@ TEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(NoHost)) { EXPECT_EQ(access_log_data_, "UH"); } -TEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(WithMetadataMatch)) { +TEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(RouteWithMetadataMatch)) { auto v1 = ProtobufWkt::Value(); v1.set_string_value("v1"); auto v2 = ProtobufWkt::Value(); @@ -787,13 +1220,102 @@ TEST_F(TcpProxyTest, DEPRECATED_FEATURE_TEST(WithMetadataMatch)) { configure(config); filter_ = std::make_unique(config_, factory_context_.cluster_manager_, timeSystem()); + filter_->initializeReadFilterCallbacks(filter_callbacks_); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); - const auto& metadata_criteria = filter_->metadataMatchCriteria()->metadataMatchCriteria(); + const auto effective_criteria = filter_->metadataMatchCriteria(); + EXPECT_NE(nullptr, effective_criteria); - EXPECT_EQ(metadata_criteria.size(), criteria.size()); + const auto& effective_criterions = effective_criteria->metadataMatchCriteria(); + EXPECT_EQ(effective_criterions.size(), criteria.size()); for (size_t i = 0; i < criteria.size(); ++i) { - EXPECT_EQ(metadata_criteria[i]->name(), criteria[i].name()); - EXPECT_EQ(metadata_criteria[i]->value(), criteria[i].value()); + EXPECT_EQ(effective_criterions[i]->name(), criteria[i].name()); + EXPECT_EQ(effective_criterions[i]->value(), criteria[i].value()); + } +} + +// Tests that the endpoint selector of a weighted cluster gets included into the +// LoadBalancerContext. +TEST_F(TcpProxyTest, WeightedClusterWithMetadataMatch) { + const std::string yaml = R"EOF( + stat_prefix: name + weighted_clusters: + clusters: + - name: cluster1 + weight: 1 + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 + - name: cluster2 + weight: 2 + metadata_match: + filter_metadata: + envoy.lb: + k2: v2 + metadata_match: + filter_metadata: + envoy.lb: + k0: v0 +)EOF"; + + config_.reset(new Config(constructConfigFromYaml(yaml, factory_context_))); + + ProtobufWkt::Value v0, v1, v2; + v0.set_string_value("v0"); + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv0(v0), hv1(v1), hv2(v2); + + filter_ = std::make_unique(config_, factory_context_.cluster_manager_, timeSystem()); + filter_->initializeReadFilterCallbacks(filter_callbacks_); + + // Expect filter to try to open a connection to cluster1. + { + Upstream::LoadBalancerContext* context; + + EXPECT_CALL(factory_context_.random_, random()).WillOnce(Return(0)); + EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster("cluster1", _, _)) + .WillOnce(DoAll(SaveArg<2>(&context), Return(nullptr))); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); + + EXPECT_NE(nullptr, context); + + const auto effective_criteria = context->metadataMatchCriteria(); + EXPECT_NE(nullptr, effective_criteria); + + const auto& effective_criterions = effective_criteria->metadataMatchCriteria(); + EXPECT_EQ(2, effective_criterions.size()); + + EXPECT_EQ("k0", effective_criterions[0]->name()); + EXPECT_EQ(hv0, effective_criterions[0]->value()); + + EXPECT_EQ("k1", effective_criterions[1]->name()); + EXPECT_EQ(hv1, effective_criterions[1]->value()); + } + + // Expect filter to try to open a connection to cluster2. + { + Upstream::LoadBalancerContext* context; + + EXPECT_CALL(factory_context_.random_, random()).WillOnce(Return(2)); + EXPECT_CALL(factory_context_.cluster_manager_, tcpConnPoolForCluster("cluster2", _, _)) + .WillOnce(DoAll(SaveArg<2>(&context), Return(nullptr))); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); + + EXPECT_NE(nullptr, context); + + const auto effective_criteria = context->metadataMatchCriteria(); + EXPECT_NE(nullptr, effective_criteria); + + const auto& effective_criterions = effective_criteria->metadataMatchCriteria(); + EXPECT_EQ(2, effective_criterions.size()); + + EXPECT_EQ("k0", effective_criterions[0]->name()); + EXPECT_EQ(hv0, effective_criterions[0]->value()); + + EXPECT_EQ("k2", effective_criterions[1]->name()); + EXPECT_EQ(hv2, effective_criterions[1]->value()); } } diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 5488cb820921..5c19347a39d7 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -372,6 +372,286 @@ TEST_P(TcpProxyIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); } +class TcpProxyMetadataMatchIntegrationTest : public TcpProxyIntegrationTest { +public: + void initialize(); + + void expectEndpointToMatchRoute(); + void expectEndpointNotToMatchRoute(); + + envoy::api::v2::core::Metadata lbMetadata(std::map values); + + envoy::config::filter::network::tcp_proxy::v2::TcpProxy tcp_proxy_; + envoy::api::v2::core::Metadata endpoint_metadata_; +}; + +envoy::api::v2::core::Metadata +TcpProxyMetadataMatchIntegrationTest::lbMetadata(std::map values) { + + ProtobufWkt::Struct map; + auto* mutable_fields = map.mutable_fields(); + ProtobufWkt::Value value; + + std::map::iterator it; + for (it = values.begin(); it != values.end(); it++) { + value.set_string_value(it->second); + mutable_fields->insert({it->first, value}); + } + + envoy::api::v2::core::Metadata metadata; + (*metadata.mutable_filter_metadata())[Envoy::Config::MetadataFilters::get().ENVOY_LB] = map; + return metadata; +} + +void TcpProxyMetadataMatchIntegrationTest::initialize() { + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v2::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + + ASSERT(static_resources->listeners_size() == 1); + static_resources->mutable_listeners(0) + ->mutable_filter_chains(0) + ->mutable_filters(0) + ->mutable_typed_config() + ->PackFrom(tcp_proxy_); + + ASSERT(static_resources->clusters_size() == 1); + auto* cluster_0 = static_resources->mutable_clusters(0); + cluster_0->Clear(); + cluster_0->set_name("cluster_0"); + cluster_0->set_type(envoy::api::v2::Cluster::STATIC); + cluster_0->set_lb_policy(envoy::api::v2::Cluster::ROUND_ROBIN); + auto* lb_subset_config = cluster_0->mutable_lb_subset_config(); + lb_subset_config->set_fallback_policy(envoy::api::v2::Cluster::LbSubsetConfig::NO_FALLBACK); + auto* subset_selector = lb_subset_config->add_subset_selectors(); + subset_selector->add_keys("role"); + subset_selector->add_keys("version"); + subset_selector->add_keys("stage"); + auto* load_assignment = cluster_0->mutable_load_assignment(); + load_assignment->set_cluster_name("cluster_0"); + auto* locality_lb_endpoints = load_assignment->add_endpoints(); + auto* lb_endpoint = locality_lb_endpoints->add_lb_endpoints(); + lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address( + Network::Test::getLoopbackAddressString(version_)); + lb_endpoint->mutable_metadata()->MergeFrom(endpoint_metadata_); + }); + + TcpProxyIntegrationTest::initialize(); +} + +// Verifies successful connection. +void TcpProxyMetadataMatchIntegrationTest::expectEndpointToMatchRoute() { + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client->write("hello"); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + ASSERT_TRUE(fake_upstream_connection->waitForData(5)); + ASSERT_TRUE(fake_upstream_connection->write("world")); + tcp_client->waitForData("world"); + tcp_client->write("hello", true); + ASSERT_TRUE(fake_upstream_connection->waitForData(10)); + ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); + ASSERT_TRUE(fake_upstream_connection->write("", true)); + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); + tcp_client->waitForDisconnect(); + + test_server_->waitForCounterGe("cluster.cluster_0.lb_subsets_selected", 1); +} + +// Verifies connection failure. +void TcpProxyMetadataMatchIntegrationTest::expectEndpointNotToMatchRoute() { + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client->write("hello"); + + // TODO(yskopets): 'tcp_client->waitForDisconnect(true);' gets stuck indefinitely on Linux builds, + // e.g. on 'envoy-linux (bazel compile_time_options)' and 'envoy-linux (bazel release)' + // tcp_client->waitForDisconnect(true); + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_none_healthy", 1); + test_server_->waitForCounterEq("cluster.cluster_0.lb_subsets_selected", 0); + + tcp_client->close(); +} + +INSTANTIATE_TEST_SUITE_P(IpVersions, TcpProxyMetadataMatchIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Test subset load balancing for a regular cluster when endpoint selector is defined at the top +// level. +TEST_P(TcpProxyMetadataMatchIntegrationTest, + EndpointShouldMatchSingleClusterWithTopLevelMetadataMatch) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + tcp_proxy_.set_cluster("cluster_0"); + tcp_proxy_.mutable_metadata_match()->MergeFrom( + lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + + endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + + initialize(); + + expectEndpointToMatchRoute(); +} + +// Test subset load balancing for a deprecated_v1 route when endpoint selector is defined at the top +// level. +TEST_P(TcpProxyMetadataMatchIntegrationTest, + DEPRECATED_FEATURE_TEST(EndpointShouldMatchRouteWithTopLevelMetadataMatch)) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + tcp_proxy_.set_cluster("fallback"); + tcp_proxy_.mutable_deprecated_v1()->add_routes()->set_cluster("cluster_0"); + tcp_proxy_.mutable_metadata_match()->MergeFrom( + lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + + endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + + initialize(); + + expectEndpointToMatchRoute(); +} + +// Test subset load balancing for a weighted cluster when endpoint selector is defined on a weighted +// cluster. +TEST_P(TcpProxyMetadataMatchIntegrationTest, EndpointShouldMatchWeightedClusterWithMetadataMatch) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); + cluster_0->set_name("cluster_0"); + cluster_0->set_weight(1); + cluster_0->mutable_metadata_match()->MergeFrom( + lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + + endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + + initialize(); + + expectEndpointToMatchRoute(); +} + +// Test subset load balancing for a weighted cluster when endpoint selector is defined both on a +// weighted cluster and at the top level. +TEST_P(TcpProxyMetadataMatchIntegrationTest, + EndpointShouldMatchWeightedClusterWithMetadataMatchAndTopLevelMetadataMatch) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + tcp_proxy_.mutable_metadata_match()->MergeFrom(lbMetadata({{"version", "v1"}, {"stage", "dev"}})); + auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); + cluster_0->set_name("cluster_0"); + cluster_0->set_weight(1); + cluster_0->mutable_metadata_match()->MergeFrom(lbMetadata( + {{"role", "master"}, {"stage", "prod"}})); // should override `stage` value at top-level + + endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + + initialize(); + + expectEndpointToMatchRoute(); +} + +// Test subset load balancing for a weighted cluster when endpoint selector is defined at the top +// level only. +TEST_P(TcpProxyMetadataMatchIntegrationTest, + EndpointShouldMatchWeightedClusterWithTopLevelMetadataMatch) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + tcp_proxy_.mutable_metadata_match()->MergeFrom( + lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); + cluster_0->set_name("cluster_0"); + cluster_0->set_weight(1); + + endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + + initialize(); + + expectEndpointToMatchRoute(); +} + +// Test subset load balancing for a regular cluster when endpoint selector is defined at the top +// level. +TEST_P(TcpProxyMetadataMatchIntegrationTest, + EndpointShouldNotMatchSingleClusterWithTopLevelMetadataMatch) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + tcp_proxy_.set_cluster("cluster_0"); + tcp_proxy_.mutable_metadata_match()->MergeFrom( + lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + + endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); + + initialize(); + + expectEndpointNotToMatchRoute(); +} + +// Test subset load balancing for a deprecated_v1 route when endpoint selector is defined at the top +// level. +TEST_P(TcpProxyMetadataMatchIntegrationTest, + DEPRECATED_FEATURE_TEST(EndpointShouldNotMatchRouteWithTopLevelMetadataMatch)) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + tcp_proxy_.set_cluster("fallback"); + tcp_proxy_.mutable_deprecated_v1()->add_routes()->set_cluster("cluster_0"); + tcp_proxy_.mutable_metadata_match()->MergeFrom( + lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + + endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); + + initialize(); + + expectEndpointNotToMatchRoute(); +} + +// Test subset load balancing for a weighted cluster when endpoint selector is defined on a weighted +// cluster. +TEST_P(TcpProxyMetadataMatchIntegrationTest, + EndpointShouldNotMatchWeightedClusterWithMetadataMatch) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); + cluster_0->set_name("cluster_0"); + cluster_0->set_weight(1); + cluster_0->mutable_metadata_match()->MergeFrom( + lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + + endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); + + initialize(); + + expectEndpointNotToMatchRoute(); +} + +// Test subset load balancing for a weighted cluster when endpoint selector is defined both on a +// weighted cluster and at the top level. +TEST_P(TcpProxyMetadataMatchIntegrationTest, + EndpointShouldNotMatchWeightedClusterWithMetadataMatchAndTopLevelMetadataMatch) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + tcp_proxy_.mutable_metadata_match()->MergeFrom(lbMetadata({{"version", "v1"}, {"stage", "dev"}})); + auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); + cluster_0->set_name("cluster_0"); + cluster_0->set_weight(1); + cluster_0->mutable_metadata_match()->MergeFrom(lbMetadata( + {{"role", "master"}, {"stage", "prod"}})); // should override `stage` value at top-level + + endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "dev"}}); + + initialize(); + + expectEndpointNotToMatchRoute(); +} + +// Test subset load balancing for a weighted cluster when endpoint selector is defined at the top +// level only. +TEST_P(TcpProxyMetadataMatchIntegrationTest, + EndpointShouldNotMatchWeightedClusterWithTopLevelMetadataMatch) { + tcp_proxy_.set_stat_prefix("tcp_stats"); + tcp_proxy_.mutable_metadata_match()->MergeFrom( + lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); + cluster_0->set_name("cluster_0"); + cluster_0->set_weight(1); + + endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); + + initialize(); + + expectEndpointNotToMatchRoute(); +} + INSTANTIATE_TEST_SUITE_P(IpVersions, TcpProxySslIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); From 17af0ba57a45bcd7711d4af4997f3c8365f8d69c Mon Sep 17 00:00:00 2001 From: Derek Argueta Date: Fri, 8 Nov 2019 15:45:08 -0800 Subject: [PATCH 02/14] buffer: remove old implementation (#8926) Signed-off-by: Derek Argueta --- docs/root/intro/version_history.rst | 1 + include/envoy/server/options.h | 5 - source/common/buffer/buffer_impl.cc | 763 +++++++----------- source/common/buffer/buffer_impl.h | 29 - source/server/BUILD | 1 - source/server/options_impl.cc | 6 +- source/server/options_impl.h | 2 - source/server/server.cc | 7 - test/common/buffer/BUILD | 7 - test/common/buffer/buffer_fuzz.cc | 4 +- test/common/buffer/buffer_fuzz.h | 2 +- test/common/buffer/buffer_fuzz_test.cc | 2 +- test/common/buffer/buffer_test.cc | 170 +--- test/common/buffer/new_buffer_fuzz_test.cc | 12 - test/common/buffer/owned_impl_test.cc | 73 +- test/common/buffer/utility.h | 30 - test/common/buffer/watermark_buffer_test.cc | 46 +- .../buffer/zero_copy_input_stream_test.cc | 20 +- .../quiche/platform/quic_platform_test.cc | 17 +- test/mocks/server/mocks.h | 1 - test/server/options_impl_test.cc | 10 +- 21 files changed, 390 insertions(+), 818 deletions(-) delete mode 100644 test/common/buffer/new_buffer_fuzz_test.cc diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 115b29f8c038..49174c77bf64 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -5,6 +5,7 @@ Version history ================ * access log: added FILTER_STATE :ref:`access log formatters ` and gRPC access logger. * api: remove all support for v1 +* buffer: remove old implementation * build: official released binary is now built against libc++. * logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. * redis: performance improvement for larger split commands by avoiding string copies. diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 88999c07e504..c19285f14d17 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -184,11 +184,6 @@ class Options { */ virtual bool mutexTracingEnabled() const PURE; - /** - * @return whether to use the old libevent evbuffer-based Buffer implementation. - */ - virtual bool libeventBufferEnabled() const PURE; - /** * @return whether to use the fake symbol table implementation. */ diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index 3f4d6a86f155..c6091cd2b26c 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -12,43 +12,26 @@ namespace Envoy { namespace Buffer { void OwnedImpl::add(const void* data, uint64_t size) { - if (old_impl_) { - evbuffer_add(buffer_.get(), data, size); - } else { - const char* src = static_cast(data); - bool new_slice_needed = slices_.empty(); - while (size != 0) { - if (new_slice_needed) { - slices_.emplace_back(OwnedSlice::create(size)); - } - uint64_t copy_size = slices_.back()->append(src, size); - src += copy_size; - size -= copy_size; - length_ += copy_size; - new_slice_needed = true; + const char* src = static_cast(data); + bool new_slice_needed = slices_.empty(); + while (size != 0) { + if (new_slice_needed) { + slices_.emplace_back(OwnedSlice::create(size)); } + uint64_t copy_size = slices_.back()->append(src, size); + src += copy_size; + size -= copy_size; + length_ += copy_size; + new_slice_needed = true; } } void OwnedImpl::addBufferFragment(BufferFragment& fragment) { - if (old_impl_) { - evbuffer_add_reference( - buffer_.get(), fragment.data(), fragment.size(), - [](const void*, size_t, void* arg) { static_cast(arg)->done(); }, - &fragment); - } else { - length_ += fragment.size(); - slices_.emplace_back(std::make_unique(fragment)); - } + length_ += fragment.size(); + slices_.emplace_back(std::make_unique(fragment)); } -void OwnedImpl::add(absl::string_view data) { - if (old_impl_) { - evbuffer_add(buffer_.get(), data.data(), data.size()); - } else { - add(data.data(), data.size()); - } -} +void OwnedImpl::add(absl::string_view data) { add(data.data(), data.size()); } void OwnedImpl::add(const Instance& data) { ASSERT(&data != this); @@ -61,305 +44,220 @@ void OwnedImpl::add(const Instance& data) { } void OwnedImpl::prepend(absl::string_view data) { - if (old_impl_) { - // Prepending an empty string seems to mess up libevent internally. - // evbuffer_prepend doesn't have a check for empty (unlike - // evbuffer_prepend_buffer which does). This then results in an allocation of - // an empty chain, which causes problems with a following move/append. This - // only seems to happen the original buffer was created via - // addBufferFragment(), this forces the code execution path in - // evbuffer_prepend related to immutable buffers. - if (data.empty()) { - return; - } - evbuffer_prepend(buffer_.get(), data.data(), data.size()); - } else { - uint64_t size = data.size(); - bool new_slice_needed = slices_.empty(); - while (size != 0) { - if (new_slice_needed) { - slices_.emplace_front(OwnedSlice::create(size)); - } - uint64_t copy_size = slices_.front()->prepend(data.data(), size); - size -= copy_size; - length_ += copy_size; - new_slice_needed = true; + uint64_t size = data.size(); + bool new_slice_needed = slices_.empty(); + while (size != 0) { + if (new_slice_needed) { + slices_.emplace_front(OwnedSlice::create(size)); } + uint64_t copy_size = slices_.front()->prepend(data.data(), size); + size -= copy_size; + length_ += copy_size; + new_slice_needed = true; } } void OwnedImpl::prepend(Instance& data) { ASSERT(&data != this); - ASSERT(isSameBufferImpl(data)); - // See the comments in move() for why we do the static_cast. - if (old_impl_) { - ASSERT(dynamic_cast(&data) != nullptr); - int rc = - evbuffer_prepend_buffer(buffer_.get(), static_cast(data).buffer().get()); - ASSERT(rc == 0); - ASSERT(data.length() == 0); - static_cast(data).postProcess(); - } else { - OwnedImpl& other = static_cast(data); - while (!other.slices_.empty()) { - uint64_t slice_size = other.slices_.back()->dataSize(); - length_ += slice_size; - slices_.emplace_front(std::move(other.slices_.back())); - other.slices_.pop_back(); - other.length_ -= slice_size; - } - other.postProcess(); + OwnedImpl& other = static_cast(data); + while (!other.slices_.empty()) { + uint64_t slice_size = other.slices_.back()->dataSize(); + length_ += slice_size; + slices_.emplace_front(std::move(other.slices_.back())); + other.slices_.pop_back(); + other.length_ -= slice_size; } + other.postProcess(); } void OwnedImpl::commit(RawSlice* iovecs, uint64_t num_iovecs) { - if (old_impl_) { - int rc = - evbuffer_commit_space(buffer_.get(), reinterpret_cast(iovecs), num_iovecs); - ASSERT(rc == 0); - } else { - if (num_iovecs == 0) { + if (num_iovecs == 0) { + return; + } + // Find the slices in the buffer that correspond to the iovecs: + // First, scan backward from the end of the buffer to find the last slice containing + // any content. Reservations are made from the end of the buffer, and out-of-order commits + // aren't supported, so any slices before this point cannot match the iovecs being committed. + ssize_t slice_index = static_cast(slices_.size()) - 1; + while (slice_index >= 0 && slices_[slice_index]->dataSize() == 0) { + slice_index--; + } + if (slice_index < 0) { + // There was no slice containing any data, so rewind the iterator at the first slice. + slice_index = 0; + if (!slices_[0]) { return; } - // Find the slices in the buffer that correspond to the iovecs: - // First, scan backward from the end of the buffer to find the last slice containing - // any content. Reservations are made from the end of the buffer, and out-of-order commits - // aren't supported, so any slices before this point cannot match the iovecs being committed. - ssize_t slice_index = static_cast(slices_.size()) - 1; - while (slice_index >= 0 && slices_[slice_index]->dataSize() == 0) { - slice_index--; - } - if (slice_index < 0) { - // There was no slice containing any data, so rewind the iterator at the first slice. - slice_index = 0; - if (!slices_[0]) { - return; - } - } + } - // Next, scan forward and attempt to match the slices against iovecs. - uint64_t num_slices_committed = 0; - while (num_slices_committed < num_iovecs) { - if (slices_[slice_index]->commit(iovecs[num_slices_committed])) { - length_ += iovecs[num_slices_committed].len_; - num_slices_committed++; - } - slice_index++; - if (slice_index == static_cast(slices_.size())) { - break; - } + // Next, scan forward and attempt to match the slices against iovecs. + uint64_t num_slices_committed = 0; + while (num_slices_committed < num_iovecs) { + if (slices_[slice_index]->commit(iovecs[num_slices_committed])) { + length_ += iovecs[num_slices_committed].len_; + num_slices_committed++; + } + slice_index++; + if (slice_index == static_cast(slices_.size())) { + break; } - - ASSERT(num_slices_committed > 0); } + + ASSERT(num_slices_committed > 0); } void OwnedImpl::copyOut(size_t start, uint64_t size, void* data) const { - if (old_impl_) { - ASSERT(start + size <= length()); - - evbuffer_ptr start_ptr; - int rc = evbuffer_ptr_set(buffer_.get(), &start_ptr, start, EVBUFFER_PTR_SET); - ASSERT(rc != -1); - - ev_ssize_t copied = evbuffer_copyout_from(buffer_.get(), &start_ptr, data, size); - ASSERT(static_cast(copied) == size); - } else { - uint64_t bytes_to_skip = start; - uint8_t* dest = static_cast(data); - for (const auto& slice : slices_) { - if (size == 0) { - break; - } - uint64_t data_size = slice->dataSize(); - if (data_size <= bytes_to_skip) { - // The offset where the caller wants to start copying is after the end of this slice, - // so just skip over this slice completely. - bytes_to_skip -= data_size; - continue; - } - uint64_t copy_size = std::min(size, data_size - bytes_to_skip); - memcpy(dest, slice->data() + bytes_to_skip, copy_size); - size -= copy_size; - dest += copy_size; - // Now that we've started copying, there are no bytes left to skip over. If there - // is any more data to be copied, the next iteration can start copying from the very - // beginning of the next slice. - bytes_to_skip = 0; + uint64_t bytes_to_skip = start; + uint8_t* dest = static_cast(data); + for (const auto& slice : slices_) { + if (size == 0) { + break; } - ASSERT(size == 0); + uint64_t data_size = slice->dataSize(); + if (data_size <= bytes_to_skip) { + // The offset where the caller wants to start copying is after the end of this slice, + // so just skip over this slice completely. + bytes_to_skip -= data_size; + continue; + } + uint64_t copy_size = std::min(size, data_size - bytes_to_skip); + memcpy(dest, slice->data() + bytes_to_skip, copy_size); + size -= copy_size; + dest += copy_size; + // Now that we've started copying, there are no bytes left to skip over. If there + // is any more data to be copied, the next iteration can start copying from the very + // beginning of the next slice. + bytes_to_skip = 0; } + ASSERT(size == 0); } void OwnedImpl::drain(uint64_t size) { - if (old_impl_) { - ASSERT(size <= length()); - int rc = evbuffer_drain(buffer_.get(), size); - ASSERT(rc == 0); - } else { - while (size != 0) { - if (slices_.empty()) { - break; - } - uint64_t slice_size = slices_.front()->dataSize(); - if (slice_size <= size) { - slices_.pop_front(); - length_ -= slice_size; - size -= slice_size; - } else { - slices_.front()->drain(size); - length_ -= size; - size = 0; - } + while (size != 0) { + if (slices_.empty()) { + break; + } + uint64_t slice_size = slices_.front()->dataSize(); + if (slice_size <= size) { + slices_.pop_front(); + length_ -= slice_size; + size -= slice_size; + } else { + slices_.front()->drain(size); + length_ -= size; + size = 0; } } } uint64_t OwnedImpl::getRawSlices(RawSlice* out, uint64_t out_size) const { - if (old_impl_) { - return evbuffer_peek(buffer_.get(), -1, nullptr, reinterpret_cast(out), - out_size); - } else { - uint64_t num_slices = 0; - for (const auto& slice : slices_) { - if (slice->dataSize() == 0) { - continue; - } - if (num_slices < out_size) { - out[num_slices].mem_ = slice->data(); - out[num_slices].len_ = slice->dataSize(); - } - // Per the definition of getRawSlices in include/envoy/buffer/buffer.h, we need to return - // the total number of slices needed to access all the data in the buffer, which can be - // larger than out_size. So we keep iterating and counting non-empty slices here, even - // if all the caller-supplied slices have been filled. - num_slices++; + uint64_t num_slices = 0; + for (const auto& slice : slices_) { + if (slice->dataSize() == 0) { + continue; + } + if (num_slices < out_size) { + out[num_slices].mem_ = slice->data(); + out[num_slices].len_ = slice->dataSize(); } - return num_slices; + // Per the definition of getRawSlices in include/envoy/buffer/buffer.h, we need to return + // the total number of slices needed to access all the data in the buffer, which can be + // larger than out_size. So we keep iterating and counting non-empty slices here, even + // if all the caller-supplied slices have been filled. + num_slices++; } + return num_slices; } uint64_t OwnedImpl::length() const { - if (old_impl_) { - return evbuffer_get_length(buffer_.get()); - } else { #ifndef NDEBUG - // When running in debug mode, verify that the precomputed length matches the sum - // of the lengths of the slices. - uint64_t length = 0; - for (const auto& slice : slices_) { - length += slice->dataSize(); - } - ASSERT(length == length_); + // When running in debug mode, verify that the precomputed length matches the sum + // of the lengths of the slices. + uint64_t length = 0; + for (const auto& slice : slices_) { + length += slice->dataSize(); + } + ASSERT(length == length_); #endif - return length_; - } + return length_; } void* OwnedImpl::linearize(uint32_t size) { RELEASE_ASSERT(size <= length(), "Linearize size exceeds buffer size"); - if (old_impl_) { - void* const ret = evbuffer_pullup(buffer_.get(), size); - RELEASE_ASSERT(ret != nullptr || size == 0, - "Failure to linearize may result in buffer overflow by the caller."); - return ret; - } else { - if (slices_.empty()) { - return nullptr; - } - uint64_t linearized_size = 0; - uint64_t num_slices_to_linearize = 0; - for (const auto& slice : slices_) { - num_slices_to_linearize++; - linearized_size += slice->dataSize(); - if (linearized_size >= size) { - break; - } - } - if (num_slices_to_linearize > 1) { - auto new_slice = OwnedSlice::create(linearized_size); - uint64_t bytes_copied = 0; - Slice::Reservation reservation = new_slice->reserve(linearized_size); - ASSERT(reservation.mem_ != nullptr); - ASSERT(reservation.len_ == linearized_size); - auto dest = static_cast(reservation.mem_); - do { - uint64_t data_size = slices_.front()->dataSize(); - memcpy(dest, slices_.front()->data(), data_size); - bytes_copied += data_size; - dest += data_size; - slices_.pop_front(); - } while (bytes_copied < linearized_size); - ASSERT(dest == static_cast(reservation.mem_) + linearized_size); - new_slice->commit(reservation); - slices_.emplace_front(std::move(new_slice)); + if (slices_.empty()) { + return nullptr; + } + uint64_t linearized_size = 0; + uint64_t num_slices_to_linearize = 0; + for (const auto& slice : slices_) { + num_slices_to_linearize++; + linearized_size += slice->dataSize(); + if (linearized_size >= size) { + break; } - return slices_.front()->data(); } + if (num_slices_to_linearize > 1) { + auto new_slice = OwnedSlice::create(linearized_size); + uint64_t bytes_copied = 0; + Slice::Reservation reservation = new_slice->reserve(linearized_size); + ASSERT(reservation.mem_ != nullptr); + ASSERT(reservation.len_ == linearized_size); + auto dest = static_cast(reservation.mem_); + do { + uint64_t data_size = slices_.front()->dataSize(); + memcpy(dest, slices_.front()->data(), data_size); + bytes_copied += data_size; + dest += data_size; + slices_.pop_front(); + } while (bytes_copied < linearized_size); + ASSERT(dest == static_cast(reservation.mem_) + linearized_size); + new_slice->commit(reservation); + slices_.emplace_front(std::move(new_slice)); + } + return slices_.front()->data(); } void OwnedImpl::move(Instance& rhs) { ASSERT(&rhs != this); - ASSERT(isSameBufferImpl(rhs)); - if (old_impl_) { - // We do the static cast here because in practice we only have one buffer implementation right - // now and this is safe. Using the evbuffer move routines require having access to both - // evbuffers. This is a reasonable compromise in a high performance path where we want to - // maintain an abstraction in case we get rid of evbuffer later. - ASSERT(dynamic_cast(&rhs) != nullptr); - int rc = evbuffer_add_buffer(buffer_.get(), static_cast(rhs).buffer().get()); - ASSERT(rc == 0); - static_cast(rhs).postProcess(); - } else { - // We do the static cast here because in practice we only have one buffer implementation right - // now and this is safe. This is a reasonable compromise in a high performance path where we - // want to maintain an abstraction. - OwnedImpl& other = static_cast(rhs); - while (!other.slices_.empty()) { - const uint64_t slice_size = other.slices_.front()->dataSize(); - slices_.emplace_back(std::move(other.slices_.front())); - other.slices_.pop_front(); - length_ += slice_size; - other.length_ -= slice_size; - } - other.postProcess(); + // We do the static cast here because in practice we only have one buffer implementation right + // now and this is safe. This is a reasonable compromise in a high performance path where we + // want to maintain an abstraction. + OwnedImpl& other = static_cast(rhs); + while (!other.slices_.empty()) { + const uint64_t slice_size = other.slices_.front()->dataSize(); + slices_.emplace_back(std::move(other.slices_.front())); + other.slices_.pop_front(); + length_ += slice_size; + other.length_ -= slice_size; } + other.postProcess(); } void OwnedImpl::move(Instance& rhs, uint64_t length) { ASSERT(&rhs != this); - ASSERT(isSameBufferImpl(rhs)); - if (old_impl_) { - // See move() above for why we do the static cast. - int rc = evbuffer_remove_buffer(static_cast(rhs).buffer().get(), - buffer_.get(), length); - ASSERT(static_cast(rc) == length); - static_cast(rhs).postProcess(); - } else { - // See move() above for why we do the static cast. - OwnedImpl& other = static_cast(rhs); - while (length != 0 && !other.slices_.empty()) { - const uint64_t slice_size = other.slices_.front()->dataSize(); - const uint64_t copy_size = std::min(slice_size, length); - if (copy_size == 0) { - other.slices_.pop_front(); - } else if (copy_size < slice_size) { - // TODO(brian-pane) add reference-counting to allow slices to share their storage - // and eliminate the copy for this partial-slice case? - add(other.slices_.front()->data(), copy_size); - other.slices_.front()->drain(copy_size); - other.length_ -= copy_size; - } else { - slices_.emplace_back(std::move(other.slices_.front())); - other.slices_.pop_front(); - length_ += slice_size; - other.length_ -= slice_size; - } - length -= copy_size; + // See move() above for why we do the static cast. + OwnedImpl& other = static_cast(rhs); + while (length != 0 && !other.slices_.empty()) { + const uint64_t slice_size = other.slices_.front()->dataSize(); + const uint64_t copy_size = std::min(slice_size, length); + if (copy_size == 0) { + other.slices_.pop_front(); + } else if (copy_size < slice_size) { + // TODO(brian-pane) add reference-counting to allow slices to share their storage + // and eliminate the copy for this partial-slice case? + add(other.slices_.front()->data(), copy_size); + other.slices_.front()->drain(copy_size); + other.length_ -= copy_size; + } else { + slices_.emplace_back(std::move(other.slices_.front())); + other.slices_.pop_front(); + length_ += slice_size; + other.length_ -= slice_size; } - other.postProcess(); + length -= copy_size; } + other.postProcess(); } Api::IoCallUint64Result OwnedImpl::read(Network::IoHandle& io_handle, uint64_t max_length) { @@ -370,31 +268,13 @@ Api::IoCallUint64Result OwnedImpl::read(Network::IoHandle& io_handle, uint64_t m RawSlice slices[MaxSlices]; const uint64_t num_slices = reserve(max_length, slices, MaxSlices); Api::IoCallUint64Result result = io_handle.readv(max_length, slices, num_slices); - if (old_impl_) { - if (!result.ok()) { - return result; - } - uint64_t num_slices_to_commit = 0; - uint64_t bytes_to_commit = result.rc_; - ASSERT(bytes_to_commit <= max_length); - while (bytes_to_commit != 0) { - slices[num_slices_to_commit].len_ = - std::min(slices[num_slices_to_commit].len_, static_cast(bytes_to_commit)); - ASSERT(bytes_to_commit >= slices[num_slices_to_commit].len_); - bytes_to_commit -= slices[num_slices_to_commit].len_; - num_slices_to_commit++; - } - ASSERT(num_slices_to_commit <= num_slices); - commit(slices, num_slices_to_commit); - } else { - uint64_t bytes_to_commit = result.ok() ? result.rc_ : 0; - ASSERT(bytes_to_commit <= max_length); - for (uint64_t i = 0; i < num_slices; i++) { - slices[i].len_ = std::min(slices[i].len_, static_cast(bytes_to_commit)); - bytes_to_commit -= slices[i].len_; - } - commit(slices, num_slices); + uint64_t bytes_to_commit = result.ok() ? result.rc_ : 0; + ASSERT(bytes_to_commit <= max_length); + for (uint64_t i = 0; i < num_slices; i++) { + slices[i].len_ = std::min(slices[i].len_, static_cast(bytes_to_commit)); + bytes_to_commit -= slices[i].len_; } + commit(slices, num_slices); return result; } @@ -402,137 +282,118 @@ uint64_t OwnedImpl::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iove if (num_iovecs == 0 || length == 0) { return 0; } - if (old_impl_) { - int ret = evbuffer_reserve_space(buffer_.get(), length, - reinterpret_cast(iovecs), num_iovecs); - RELEASE_ASSERT(ret >= 1, "Failure to allocate may result in callers writing to uninitialized " - "memory, buffer overflows, etc"); - return static_cast(ret); - } else { - // Check whether there are any empty slices with reservable space at the end of the buffer. - size_t first_reservable_slice = slices_.size(); - while (first_reservable_slice > 0) { - if (slices_[first_reservable_slice - 1]->reservableSize() == 0) { - break; - } - first_reservable_slice--; - if (slices_[first_reservable_slice]->dataSize() != 0) { - // There is some content in this slice, so anything in front of it is non-reservable. - break; - } + // Check whether there are any empty slices with reservable space at the end of the buffer. + size_t first_reservable_slice = slices_.size(); + while (first_reservable_slice > 0) { + if (slices_[first_reservable_slice - 1]->reservableSize() == 0) { + break; } - - // Having found the sequence of reservable slices at the back of the buffer, reserve - // as much space as possible from each one. - uint64_t num_slices_used = 0; - uint64_t bytes_remaining = length; - size_t slice_index = first_reservable_slice; - while (slice_index < slices_.size() && bytes_remaining != 0 && num_slices_used < num_iovecs) { - auto& slice = slices_[slice_index]; - const uint64_t reservation_size = std::min(slice->reservableSize(), bytes_remaining); - if (num_slices_used + 1 == num_iovecs && reservation_size < bytes_remaining) { - // There is only one iovec left, and this next slice does not have enough space to - // complete the reservation. Stop iterating, with last one iovec still unpopulated, - // so the code following this loop can allocate a new slice to hold the rest of the - // reservation. - break; - } - iovecs[num_slices_used] = slice->reserve(reservation_size); - bytes_remaining -= iovecs[num_slices_used].len_; - num_slices_used++; - slice_index++; + first_reservable_slice--; + if (slices_[first_reservable_slice]->dataSize() != 0) { + // There is some content in this slice, so anything in front of it is non-reservable. + break; } + } - // If needed, allocate one more slice at the end to provide the remainder of the reservation. - if (bytes_remaining != 0) { - slices_.emplace_back(OwnedSlice::create(bytes_remaining)); - iovecs[num_slices_used] = slices_.back()->reserve(bytes_remaining); - bytes_remaining -= iovecs[num_slices_used].len_; - num_slices_used++; + // Having found the sequence of reservable slices at the back of the buffer, reserve + // as much space as possible from each one. + uint64_t num_slices_used = 0; + uint64_t bytes_remaining = length; + size_t slice_index = first_reservable_slice; + while (slice_index < slices_.size() && bytes_remaining != 0 && num_slices_used < num_iovecs) { + auto& slice = slices_[slice_index]; + const uint64_t reservation_size = std::min(slice->reservableSize(), bytes_remaining); + if (num_slices_used + 1 == num_iovecs && reservation_size < bytes_remaining) { + // There is only one iovec left, and this next slice does not have enough space to + // complete the reservation. Stop iterating, with last one iovec still unpopulated, + // so the code following this loop can allocate a new slice to hold the rest of the + // reservation. + break; } + iovecs[num_slices_used] = slice->reserve(reservation_size); + bytes_remaining -= iovecs[num_slices_used].len_; + num_slices_used++; + slice_index++; + } - ASSERT(num_slices_used <= num_iovecs); - ASSERT(bytes_remaining == 0); - return num_slices_used; + // If needed, allocate one more slice at the end to provide the remainder of the reservation. + if (bytes_remaining != 0) { + slices_.emplace_back(OwnedSlice::create(bytes_remaining)); + iovecs[num_slices_used] = slices_.back()->reserve(bytes_remaining); + bytes_remaining -= iovecs[num_slices_used].len_; + num_slices_used++; } + + ASSERT(num_slices_used <= num_iovecs); + ASSERT(bytes_remaining == 0); + return num_slices_used; } ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { - if (old_impl_) { - evbuffer_ptr start_ptr; - if (-1 == evbuffer_ptr_set(buffer_.get(), &start_ptr, start, EVBUFFER_PTR_SET)) { - return -1; - } - - evbuffer_ptr result_ptr = - evbuffer_search(buffer_.get(), static_cast(data), size, &start_ptr); - return result_ptr.pos; - } else { - // This implementation uses the same search algorithm as evbuffer_search(), a naive - // scan that requires O(M*N) comparisons in the worst case. - // TODO(brian-pane): replace this with a more efficient search if it shows up - // prominently in CPU profiling. - if (size == 0) { - return (start <= length_) ? start : -1; + // This implementation uses the same search algorithm as evbuffer_search(), a naive + // scan that requires O(M*N) comparisons in the worst case. + // TODO(brian-pane): replace this with a more efficient search if it shows up + // prominently in CPU profiling. + if (size == 0) { + return (start <= length_) ? start : -1; + } + ssize_t offset = 0; + const uint8_t* needle = static_cast(data); + for (size_t slice_index = 0; slice_index < slices_.size(); slice_index++) { + const auto& slice = slices_[slice_index]; + uint64_t slice_size = slice->dataSize(); + if (slice_size <= start) { + start -= slice_size; + offset += slice_size; + continue; } - ssize_t offset = 0; - const uint8_t* needle = static_cast(data); - for (size_t slice_index = 0; slice_index < slices_.size(); slice_index++) { - const auto& slice = slices_[slice_index]; - uint64_t slice_size = slice->dataSize(); - if (slice_size <= start) { - start -= slice_size; - offset += slice_size; - continue; + const uint8_t* slice_start = slice->data(); + const uint8_t* haystack = slice_start; + const uint8_t* haystack_end = haystack + slice_size; + haystack += start; + while (haystack < haystack_end) { + // Search within this slice for the first byte of the needle. + const uint8_t* first_byte_match = + static_cast(memchr(haystack, needle[0], haystack_end - haystack)); + if (first_byte_match == nullptr) { + break; } - const uint8_t* slice_start = slice->data(); - const uint8_t* haystack = slice_start; - const uint8_t* haystack_end = haystack + slice_size; - haystack += start; - while (haystack < haystack_end) { - // Search within this slice for the first byte of the needle. - const uint8_t* first_byte_match = - static_cast(memchr(haystack, needle[0], haystack_end - haystack)); - if (first_byte_match == nullptr) { - break; - } - // After finding a match for the first byte of the needle, check whether the following - // bytes in the buffer match the remainder of the needle. Note that the match can span - // two or more slices. - size_t i = 1; - size_t match_index = slice_index; - const uint8_t* match_next = first_byte_match + 1; - const uint8_t* match_end = haystack_end; - while (i < size) { - if (match_next >= match_end) { - // We've hit the end of this slice, so continue checking against the next slice. - match_index++; - if (match_index == slices_.size()) { - // We've hit the end of the entire buffer. - break; - } - const auto& match_slice = slices_[match_index]; - match_next = match_slice->data(); - match_end = match_next + match_slice->dataSize(); - continue; - } - if (*match_next++ != needle[i]) { + // After finding a match for the first byte of the needle, check whether the following + // bytes in the buffer match the remainder of the needle. Note that the match can span + // two or more slices. + size_t i = 1; + size_t match_index = slice_index; + const uint8_t* match_next = first_byte_match + 1; + const uint8_t* match_end = haystack_end; + while (i < size) { + if (match_next >= match_end) { + // We've hit the end of this slice, so continue checking against the next slice. + match_index++; + if (match_index == slices_.size()) { + // We've hit the end of the entire buffer. break; } - i++; + const auto& match_slice = slices_[match_index]; + match_next = match_slice->data(); + match_end = match_next + match_slice->dataSize(); + continue; } - if (i == size) { - // Successful match of the entire needle. - return offset + (first_byte_match - slice_start); + if (*match_next++ != needle[i]) { + break; } - // If this wasn't a successful match, start scanning again at the next byte. - haystack = first_byte_match + 1; + i++; } - start = 0; - offset += slice_size; + if (i == size) { + // Successful match of the entire needle. + return offset + (first_byte_match - slice_start); + } + // If this wasn't a successful match, start scanning again at the next byte. + haystack = first_byte_match + 1; } - return -1; + start = 0; + offset += slice_size; } + return -1; } bool OwnedImpl::startsWith(absl::string_view data) const { @@ -545,44 +406,29 @@ bool OwnedImpl::startsWith(absl::string_view data) const { return true; } - if (old_impl_) { - evbuffer_ptr start_ptr, end_ptr; - if (-1 == evbuffer_ptr_set(buffer_.get(), &start_ptr, 0, EVBUFFER_PTR_SET)) { - return false; - } + const uint8_t* prefix = reinterpret_cast(data.data()); + size_t size = data.length(); + for (const auto& slice : slices_) { + uint64_t slice_size = slice->dataSize(); + const uint8_t* slice_start = slice->data(); - if (-1 == evbuffer_ptr_set(buffer_.get(), &end_ptr, data.length(), EVBUFFER_PTR_SET)) { - return false; + if (slice_size >= size) { + // The remaining size bytes of data are in this slice. + return memcmp(prefix, slice_start, size) == 0; } - evbuffer_ptr result_ptr = - evbuffer_search_range(buffer_.get(), data.data(), data.length(), &start_ptr, &end_ptr); - return result_ptr.pos == 0; - } else { - const uint8_t* prefix = reinterpret_cast(data.data()); - size_t size = data.length(); - for (const auto& slice : slices_) { - uint64_t slice_size = slice->dataSize(); - const uint8_t* slice_start = slice->data(); - - if (slice_size >= size) { - // The remaining size bytes of data are in this slice. - return memcmp(prefix, slice_start, size) == 0; - } - - // Slice is smaller than data, see if the prefix matches. - if (memcmp(prefix, slice_start, slice_size) != 0) { - return false; - } - - // Prefix matched. Continue looking at the next slice. - prefix += slice_size; - size -= slice_size; + // Slice is smaller than data, see if the prefix matches. + if (memcmp(prefix, slice_start, slice_size) != 0) { + return false; } - // Less data in slices than length() reported. - NOT_REACHED_GCOVR_EXCL_LINE; + // Prefix matched. Continue looking at the next slice. + prefix += slice_size; + size -= slice_size; } + + // Less data in slices than length() reported. + NOT_REACHED_GCOVR_EXCL_LINE; } Api::IoCallUint64Result OwnedImpl::write(Network::IoHandle& io_handle) { @@ -596,11 +442,7 @@ Api::IoCallUint64Result OwnedImpl::write(Network::IoHandle& io_handle) { return result; } -OwnedImpl::OwnedImpl() : old_impl_(use_old_impl_) { - if (old_impl_) { - buffer_ = evbuffer_new(); - } -} +OwnedImpl::OwnedImpl() = default; OwnedImpl::OwnedImpl(absl::string_view data) : OwnedImpl() { add(data); } @@ -628,30 +470,13 @@ std::string OwnedImpl::toString() const { void OwnedImpl::postProcess() {} void OwnedImpl::appendSliceForTest(const void* data, uint64_t size) { - if (old_impl_) { - OwnedImpl rhs(data, size); - move(rhs); - } else { - slices_.emplace_back(OwnedSlice::create(data, size)); - length_ += size; - } + slices_.emplace_back(OwnedSlice::create(data, size)); + length_ += size; } void OwnedImpl::appendSliceForTest(absl::string_view data) { appendSliceForTest(data.data(), data.size()); } -void OwnedImpl::useOldImpl(bool use_old_impl) { use_old_impl_ = use_old_impl; } - -bool OwnedImpl::isSameBufferImpl(const Instance& rhs) const { - const OwnedImpl* other = dynamic_cast(&rhs); - if (other == nullptr) { - return false; - } - return usesOldImpl() == other->usesOldImpl(); -} - -bool OwnedImpl::use_old_impl_ = false; - } // namespace Buffer } // namespace Envoy diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 114d14dabc94..bfba52898468 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -442,8 +442,6 @@ class BufferFragmentImpl : NonCopyable, public BufferFragment { class LibEventInstance : public Instance { public: - // Allows access into the underlying buffer for move() optimizations. - virtual Event::Libevent::BufferPtr& buffer() PURE; // Called after accessing the memory in buffer() directly to allow any post-processing. virtual void postProcess() PURE; }; @@ -508,7 +506,6 @@ class OwnedImpl : public LibEventInstance { std::string toString() const override; // LibEventInstance - Event::Libevent::BufferPtr& buffer() override { return buffer_; } void postProcess() override; /** @@ -524,23 +521,6 @@ class OwnedImpl : public LibEventInstance { */ void appendSliceForTest(absl::string_view data); - // Support for choosing the buffer implementation at runtime. - // TODO(brian-pane) remove this once the new implementation has been - // running in production for a while. - - /** @return whether this buffer uses the old evbuffer-based implementation. */ - bool usesOldImpl() const { return old_impl_; } - - /** - * @param use_old_impl whether to use the evbuffer-based implementation for new buffers - * @warning Except for testing code, this method should be called at most once per process, - * before any OwnedImpl objects are created. The reason is that it is unsafe to - * mix and match buffers with different implementations. The move() method, - * in particular, only works if the source and destination objects are using - * the same destination. - */ - static void useOldImpl(bool use_old_impl); - private: /** * @param rhs another buffer @@ -549,20 +529,11 @@ class OwnedImpl : public LibEventInstance { */ bool isSameBufferImpl(const Instance& rhs) const; - /** Whether to use the old evbuffer implementation when constructing new OwnedImpl objects. */ - static bool use_old_impl_; - - /** Whether this buffer uses the old evbuffer implementation. */ - bool old_impl_; - /** Ring buffer of slices. */ SliceDeque slices_; /** Sum of the dataSize of all slices. */ OverflowDetectingUInt64 length_; - - /** Used when old_impl_==true */ - Event::Libevent::BufferPtr buffer_; }; using BufferFragmentPtr = std::unique_ptr; diff --git a/source/server/BUILD b/source/server/BUILD index ee4de6e782df..53495174469b 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -398,7 +398,6 @@ envoy_cc_library( "//include/envoy/upstream:cluster_manager_interface", "//source/common/access_log:access_log_manager_lib", "//source/common/api:api_lib", - "//source/common/buffer:buffer_lib", "//source/common/common:cleanup_lib", "//source/common/common:logger_lib", "//source/common/common:mutex_tracer_lib", diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 15aa9469b18a..59aceafddd61 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -113,9 +113,6 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, TCLAP::SwitchArg cpuset_threads( "", "cpuset-threads", "Get the default # of worker threads from cpuset size", cmd, false); - TCLAP::ValueArg use_libevent_buffer("", "use-libevent-buffers", - "Use the original libevent buffer implementation", - false, false, "bool", cmd); TCLAP::ValueArg use_fake_symbol_table("", "use-fake-symbol-table", "Use fake symbol table implementation", false, true, "bool", cmd); @@ -141,7 +138,6 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, mutex_tracing_enabled_ = enable_mutex_tracing.getValue(); - libevent_buffer_enabled_ = use_libevent_buffer.getValue(); fake_symbol_table_enabled_ = use_fake_symbol_table.getValue(); cpuset_threads_ = cpuset_threads.getValue(); @@ -310,6 +306,6 @@ OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& service_zone_(service_zone), file_flush_interval_msec_(10000), drain_time_(600), parent_shutdown_time_(900), mode_(Server::Mode::Serve), hot_restart_disabled_(false), signal_handling_enabled_(true), mutex_tracing_enabled_(false), cpuset_threads_(false), - libevent_buffer_enabled_(false), fake_symbol_table_enabled_(false) {} + fake_symbol_table_enabled_(false) {} } // namespace Envoy diff --git a/source/server/options_impl.h b/source/server/options_impl.h index 91dc3d3522ae..2d635fd91b9c 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -120,7 +120,6 @@ class OptionsImpl : public Server::Options, protected Logger::LoggableregisteredNames(), ", ")); } - // Enable the selected buffer implementation (old libevent evbuffer version or new native - // version) early in the initialization, before any buffers can be created. - Buffer::OwnedImpl::useOldImpl(options.libeventBufferEnabled()); - ENVOY_LOG(info, "buffer implementation: {}", - Buffer::OwnedImpl().usesOldImpl() ? "old (libevent)" : "new"); - // Handle configuration that needs to take place prior to the main configuration load. InstanceUtil::loadBootstrapConfig(bootstrap_, options, messageValidationContext().staticValidationVisitor(), *api_); diff --git a/test/common/buffer/BUILD b/test/common/buffer/BUILD index 278f01ab85f1..a894dcd94e9c 100644 --- a/test/common/buffer/BUILD +++ b/test/common/buffer/BUILD @@ -58,13 +58,6 @@ envoy_cc_test( ], ) -envoy_cc_fuzz_test( - name = "new_buffer_fuzz_test", - srcs = ["new_buffer_fuzz_test.cc"], - corpus = "buffer_corpus", - deps = [":buffer_fuzz_lib"], -) - envoy_cc_test( name = "owned_impl_test", srcs = ["owned_impl_test.cc"], diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc index 1b20fc60d4f7..3535b2c67aec 100644 --- a/test/common/buffer/buffer_fuzz.cc +++ b/test/common/buffer/buffer_fuzz.cc @@ -477,9 +477,7 @@ void executeActions(const test::common::buffer::BufferFuzzTestCase& input, Buffe } } -void BufferFuzz::bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input, bool old_impl) { - ENVOY_LOG_MISC(trace, "Using {} buffer implementation", old_impl ? "old" : "new"); - Buffer::OwnedImpl::useOldImpl(old_impl); +void BufferFuzz::bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input) { Context ctxt; // Fuzzed buffers. BufferList buffers; diff --git a/test/common/buffer/buffer_fuzz.h b/test/common/buffer/buffer_fuzz.h index 108ac4cab45d..71d542a972c9 100644 --- a/test/common/buffer/buffer_fuzz.h +++ b/test/common/buffer/buffer_fuzz.h @@ -6,7 +6,7 @@ namespace Envoy { class BufferFuzz { public: - static void bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input, bool old_impl); + static void bufferFuzz(const test::common::buffer::BufferFuzzTestCase& input); }; } // namespace Envoy diff --git a/test/common/buffer/buffer_fuzz_test.cc b/test/common/buffer/buffer_fuzz_test.cc index fa23082b4096..917428fd3844 100644 --- a/test/common/buffer/buffer_fuzz_test.cc +++ b/test/common/buffer/buffer_fuzz_test.cc @@ -6,7 +6,7 @@ namespace Envoy { // Fuzz the old owned buffer implementation. DEFINE_PROTO_FUZZER(const test::common::buffer::BufferFuzzTestCase& input) { - Envoy::BufferFuzz::bufferFuzz(input, true); + Envoy::BufferFuzz::bufferFuzz(input); } } // namespace Envoy diff --git a/test/common/buffer/buffer_test.cc b/test/common/buffer/buffer_test.cc index bfd3b83dcf7b..17a7239e87b0 100644 --- a/test/common/buffer/buffer_test.cc +++ b/test/common/buffer/buffer_test.cc @@ -283,15 +283,9 @@ TEST(SliceDequeTest, CreateDelete) { EXPECT_TRUE(slice3_deleted); } -class BufferHelperTest : public BufferImplementationParamTest {}; - -INSTANTIATE_TEST_SUITE_P(BufferHelperTest, BufferHelperTest, - testing::ValuesIn({BufferImplementation::Old, BufferImplementation::New})); - -TEST_P(BufferHelperTest, PeekI8) { +TEST(BufferHelperTest, PeekI8) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 0xFE}); EXPECT_EQ(buffer.peekInt(), 0); EXPECT_EQ(buffer.peekInt(0), 0); @@ -302,22 +296,19 @@ TEST_P(BufferHelperTest, PeekI8) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeByte(0); EXPECT_THROW_WITH_MESSAGE(buffer.peekInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekLEI16) { +TEST(BufferHelperTest, PeekLEI16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x0100); EXPECT_EQ(buffer.peekLEInt(0), 0x0100); @@ -329,22 +320,19 @@ TEST_P(BufferHelperTest, PeekLEI16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 2, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekLEI32) { +TEST(BufferHelperTest, PeekLEI32) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x03020100); EXPECT_EQ(buffer.peekLEInt(0), 0x03020100); @@ -355,22 +343,19 @@ TEST_P(BufferHelperTest, PeekLEI32) { } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 4, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekLEI64) { +TEST(BufferHelperTest, PeekLEI64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x0706050403020100); EXPECT_EQ(buffer.peekLEInt(0), 0x0706050403020100); @@ -390,7 +375,6 @@ TEST_P(BufferHelperTest, PeekLEI64) { { // signed Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFE, 0xFF, 0xFF}); EXPECT_EQ((buffer.peekLEInt()), -1); EXPECT_EQ((buffer.peekLEInt(2)), 255); // 0x00FF @@ -400,7 +384,6 @@ TEST_P(BufferHelperTest, PeekLEI64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF}); EXPECT_THROW_WITH_MESSAGE( (buffer.peekLEInt(buffer.length() - sizeof(int64_t) + 1)), @@ -409,22 +392,19 @@ TEST_P(BufferHelperTest, PeekLEI64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 8, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekLEU16) { +TEST(BufferHelperTest, PeekLEU16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x0100); EXPECT_EQ(buffer.peekLEInt(0), 0x0100); @@ -435,22 +415,19 @@ TEST_P(BufferHelperTest, PeekLEU16) { } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 2, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekLEU32) { +TEST(BufferHelperTest, PeekLEU32) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x03020100); EXPECT_EQ(buffer.peekLEInt(0), 0x03020100); @@ -461,22 +438,19 @@ TEST_P(BufferHelperTest, PeekLEU32) { } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 4, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekLEU64) { +TEST(BufferHelperTest, PeekLEU64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekLEInt(), 0x0706050403020100); EXPECT_EQ(buffer.peekLEInt(0), 0x0706050403020100); @@ -487,22 +461,19 @@ TEST_P(BufferHelperTest, PeekLEU64) { } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 8, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekLEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekBEI16) { +TEST(BufferHelperTest, PeekBEI16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 1); EXPECT_EQ(buffer.peekBEInt(0), 1); @@ -514,22 +485,19 @@ TEST_P(BufferHelperTest, PeekBEI16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 2, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekBEI32) { +TEST(BufferHelperTest, PeekBEI32) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 0x00010203); EXPECT_EQ(buffer.peekBEInt(0), 0x00010203); @@ -540,22 +508,19 @@ TEST_P(BufferHelperTest, PeekBEI32) { } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 4, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekBEI64) { +TEST(BufferHelperTest, PeekBEI64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 0x0001020304050607); EXPECT_EQ(buffer.peekBEInt(0), 0x0001020304050607); @@ -574,7 +539,6 @@ TEST_P(BufferHelperTest, PeekBEI64) { { // signed Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0xFE}); EXPECT_EQ((buffer.peekBEInt()), -1); EXPECT_EQ((buffer.peekBEInt(2)), -256); // 0xFF00 @@ -584,7 +548,6 @@ TEST_P(BufferHelperTest, PeekBEI64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF}); EXPECT_THROW_WITH_MESSAGE( (buffer.peekBEInt(buffer.length() - sizeof(int64_t) + 1)), @@ -593,22 +556,19 @@ TEST_P(BufferHelperTest, PeekBEI64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 8, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekBEU16) { +TEST(BufferHelperTest, PeekBEU16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 1); EXPECT_EQ(buffer.peekBEInt(0), 1); @@ -619,22 +579,19 @@ TEST_P(BufferHelperTest, PeekBEU16) { } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 2, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekBEU32) { +TEST(BufferHelperTest, PeekBEU32) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 0x00010203); EXPECT_EQ(buffer.peekBEInt(0), 0x00010203); @@ -645,22 +602,19 @@ TEST_P(BufferHelperTest, PeekBEU32) { } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 4, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, PeekBEU64) { +TEST(BufferHelperTest, PeekBEU64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.peekBEInt(), 0x0001020304050607); EXPECT_EQ(buffer.peekBEInt(0), 0x0001020304050607); @@ -671,21 +625,18 @@ TEST_P(BufferHelperTest, PeekBEU64) { } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(0), EnvoyException, "buffer underflow"); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addRepeated(buffer, 8, 0); EXPECT_THROW_WITH_MESSAGE(buffer.peekBEInt(1), EnvoyException, "buffer underflow"); } } -TEST_P(BufferHelperTest, DrainI8) { +TEST(BufferHelperTest, DrainI8) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 0xFE}); EXPECT_EQ(buffer.drainInt(), 0); EXPECT_EQ(buffer.drainInt(), 1); @@ -693,9 +644,8 @@ TEST_P(BufferHelperTest, DrainI8) { EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainLEI16) { +TEST(BufferHelperTest, DrainLEI16) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x0100); EXPECT_EQ(buffer.drainLEInt(), 0x0302); @@ -703,45 +653,40 @@ TEST_P(BufferHelperTest, DrainLEI16) { EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainLEI32) { +TEST(BufferHelperTest, DrainLEI32) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x03020100); EXPECT_EQ(buffer.drainLEInt(), -1); EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainLEI64) { +TEST(BufferHelperTest, DrainLEI64) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x0706050403020100); EXPECT_EQ(buffer.drainLEInt(), -1); EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainLEU32) { +TEST(BufferHelperTest, DrainLEU32) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x03020100); EXPECT_EQ(buffer.drainLEInt(), 0xFFFFFFFF); EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainLEU64) { +TEST(BufferHelperTest, DrainLEU64) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainLEInt(), 0x0706050403020100); EXPECT_EQ(buffer.drainLEInt(), 0xFFFFFFFFFFFFFFFF); EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainBEI16) { +TEST(BufferHelperTest, DrainBEI16) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 1); EXPECT_EQ(buffer.drainBEInt(), 0x0203); @@ -749,45 +694,40 @@ TEST_P(BufferHelperTest, DrainBEI16) { EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainBEI32) { +TEST(BufferHelperTest, DrainBEI32) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 0x00010203); EXPECT_EQ(buffer.drainBEInt(), -1); EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainBEI64) { +TEST(BufferHelperTest, DrainBEI64) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 0x0001020304050607); EXPECT_EQ(buffer.drainBEInt(), -1); EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainBEU32) { +TEST(BufferHelperTest, DrainBEU32) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 0x00010203); EXPECT_EQ(buffer.drainBEInt(), 0xFFFFFFFF); EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, DrainBEU64) { +TEST(BufferHelperTest, DrainBEU64) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); addSeq(buffer, {0, 1, 2, 3, 4, 5, 6, 7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}); EXPECT_EQ(buffer.drainBEInt(), 0x0001020304050607); EXPECT_EQ(buffer.drainBEInt(), 0xFFFFFFFFFFFFFFFF); EXPECT_EQ(buffer.length(), 0); } -TEST_P(BufferHelperTest, WriteI8) { +TEST(BufferHelperTest, WriteI8) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeByte(-128); buffer.writeByte(-1); buffer.writeByte(0); @@ -797,269 +737,229 @@ TEST_P(BufferHelperTest, WriteI8) { EXPECT_EQ(std::string("\x80\xFF\0\x1\x7F", 5), buffer.toString()); } -TEST_P(BufferHelperTest, WriteLEI16) { +TEST(BufferHelperTest, WriteLEI16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\0\x80", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\x7F", buffer.toString()); } } -TEST_P(BufferHelperTest, WriteLEU16) { +TEST(BufferHelperTest, WriteLEU16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(static_cast(std::numeric_limits::max()) + 1); EXPECT_EQ(std::string("\0\x80", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF", buffer.toString()); } } -TEST_P(BufferHelperTest, WriteLEI32) { +TEST(BufferHelperTest, WriteLEI32) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\0\0\0\x80", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF\xFF\x7F", buffer.toString()); } } -TEST_P(BufferHelperTest, WriteLEU32) { +TEST(BufferHelperTest, WriteLEU32) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(static_cast(std::numeric_limits::max()) + 1); EXPECT_EQ(std::string("\0\0\0\x80", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF\xFF\xFF", buffer.toString()); } } -TEST_P(BufferHelperTest, WriteLEI64) { +TEST(BufferHelperTest, WriteLEI64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\0\0\0\0\0\0\0\x80", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(1); EXPECT_EQ(std::string("\x1\0\0\0\0\0\0\0", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(0); EXPECT_EQ(std::string("\0\0\0\0\0\0\0\0", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeLEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", buffer.toString()); } } -TEST_P(BufferHelperTest, WriteBEI16) { +TEST(BufferHelperTest, WriteBEI16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\x80\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\x1", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\x7F\xFF", buffer.toString()); } } -TEST_P(BufferHelperTest, WriteBEU16) { +TEST(BufferHelperTest, WriteBEU16) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\x1", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(static_cast(std::numeric_limits::max()) + 1); EXPECT_EQ(std::string("\x80\0", 2), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF", buffer.toString()); } } -TEST_P(BufferHelperTest, WriteBEI32) { +TEST(BufferHelperTest, WriteBEI32) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\x80\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\0\0\x1", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\x7F\xFF\xFF\xFF", buffer.toString()); } } -TEST_P(BufferHelperTest, WriteBEU32) { +TEST(BufferHelperTest, WriteBEU32) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\0\0\x1", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(static_cast(std::numeric_limits::max()) + 1); EXPECT_EQ(std::string("\x80\0\0\0", 4), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\xFF\xFF\xFF\xFF", buffer.toString()); } } -TEST_P(BufferHelperTest, WriteBEI64) { +TEST(BufferHelperTest, WriteBEI64) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::min()); EXPECT_EQ(std::string("\x80\0\0\0\0\0\0\0\0", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(1); EXPECT_EQ(std::string("\0\0\0\0\0\0\0\x1", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(0); EXPECT_EQ(std::string("\0\0\0\0\0\0\0\0", 8), buffer.toString()); } { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.writeBEInt(std::numeric_limits::max()); EXPECT_EQ("\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF", buffer.toString()); } diff --git a/test/common/buffer/new_buffer_fuzz_test.cc b/test/common/buffer/new_buffer_fuzz_test.cc deleted file mode 100644 index bc8f71910526..000000000000 --- a/test/common/buffer/new_buffer_fuzz_test.cc +++ /dev/null @@ -1,12 +0,0 @@ -#include "test/common/buffer/buffer_fuzz.h" -#include "test/common/buffer/buffer_fuzz.pb.h" -#include "test/fuzz/fuzz_runner.h" - -namespace Envoy { - -// Fuzz the new owned buffer implementation. -DEFINE_PROTO_FUZZER(const test::common::buffer::BufferFuzzTestCase& input) { - Envoy::BufferFuzz::bufferFuzz(input, false); -} - -} // namespace Envoy diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index 19e6daa06fb6..14fd7145e0ac 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -19,7 +19,7 @@ namespace Envoy { namespace Buffer { namespace { -class OwnedImplTest : public BufferImplementationParamTest { +class OwnedImplTest : public testing::Test { public: bool release_callback_called_ = false; @@ -36,14 +36,10 @@ class OwnedImplTest : public BufferImplementationParamTest { } }; -INSTANTIATE_TEST_SUITE_P(OwnedImplTest, OwnedImplTest, - testing::ValuesIn({BufferImplementation::Old, BufferImplementation::New})); - -TEST_P(OwnedImplTest, AddBufferFragmentNoCleanup) { +TEST_F(OwnedImplTest, AddBufferFragmentNoCleanup) { char input[] = "hello world"; BufferFragmentImpl frag(input, 11, nullptr); Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.addBufferFragment(frag); EXPECT_EQ(11, buffer.length()); @@ -51,13 +47,12 @@ TEST_P(OwnedImplTest, AddBufferFragmentNoCleanup) { EXPECT_EQ(0, buffer.length()); } -TEST_P(OwnedImplTest, AddBufferFragmentWithCleanup) { +TEST_F(OwnedImplTest, AddBufferFragmentWithCleanup) { char input[] = "hello world"; BufferFragmentImpl frag(input, 11, [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.addBufferFragment(frag); EXPECT_EQ(11, buffer.length()); @@ -70,7 +65,7 @@ TEST_P(OwnedImplTest, AddBufferFragmentWithCleanup) { EXPECT_TRUE(release_callback_called_); } -TEST_P(OwnedImplTest, AddBufferFragmentDynamicAllocation) { +TEST_F(OwnedImplTest, AddBufferFragmentDynamicAllocation) { char input_stack[] = "hello world"; char* input = new char[11]; std::copy(input_stack, input_stack + 11, input); @@ -83,7 +78,6 @@ TEST_P(OwnedImplTest, AddBufferFragmentDynamicAllocation) { }); Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.addBufferFragment(*frag); EXPECT_EQ(11, buffer.length()); @@ -96,14 +90,13 @@ TEST_P(OwnedImplTest, AddBufferFragmentDynamicAllocation) { EXPECT_TRUE(release_callback_called_); } -TEST_P(OwnedImplTest, AddOwnedBufferFragmentWithCleanup) { +TEST_F(OwnedImplTest, AddOwnedBufferFragmentWithCleanup) { char input[] = "hello world"; const size_t expected_length = sizeof(input) - 1; auto frag = OwnedBufferFragmentImpl::create( {input, expected_length}, [this](const OwnedBufferFragmentImpl*) { release_callback_called_ = true; }); Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.addBufferFragment(*frag); EXPECT_EQ(expected_length, buffer.length()); @@ -118,7 +111,7 @@ TEST_P(OwnedImplTest, AddOwnedBufferFragmentWithCleanup) { } // Verify that OwnedBufferFragment work correctly when input buffer is allocated on the heap. -TEST_P(OwnedImplTest, AddOwnedBufferFragmentDynamicAllocation) { +TEST_F(OwnedImplTest, AddOwnedBufferFragmentDynamicAllocation) { char input_stack[] = "hello world"; const size_t expected_length = sizeof(input_stack) - 1; char* input = new char[expected_length]; @@ -133,7 +126,6 @@ TEST_P(OwnedImplTest, AddOwnedBufferFragmentDynamicAllocation) { .release(); Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.addBufferFragment(*frag); EXPECT_EQ(expected_length, buffer.length()); @@ -147,11 +139,9 @@ TEST_P(OwnedImplTest, AddOwnedBufferFragmentDynamicAllocation) { EXPECT_TRUE(release_callback_called_); } -TEST_P(OwnedImplTest, Add) { +TEST_F(OwnedImplTest, Add) { const std::string string1 = "Hello, ", string2 = "World!"; Buffer::OwnedImpl buffer; - verifyImplementation(buffer); - buffer.add(string1); EXPECT_EQ(string1.size(), buffer.length()); EXPECT_EQ(string1, buffer.toString()); @@ -173,10 +163,9 @@ TEST_P(OwnedImplTest, Add) { EXPECT_EQ(string1 + string2 + big_suffix, buffer.toString()); } -TEST_P(OwnedImplTest, Prepend) { +TEST_F(OwnedImplTest, Prepend) { const std::string suffix = "World!", prefix = "Hello, "; Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.add(suffix); buffer.prepend(prefix); @@ -196,10 +185,9 @@ TEST_P(OwnedImplTest, Prepend) { EXPECT_EQ(big_prefix + prefix + suffix, buffer.toString()); } -TEST_P(OwnedImplTest, PrependToEmptyBuffer) { +TEST_F(OwnedImplTest, PrependToEmptyBuffer) { std::string data = "Hello, World!"; Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.prepend(data); EXPECT_EQ(data.size(), buffer.length()); @@ -211,13 +199,11 @@ TEST_P(OwnedImplTest, PrependToEmptyBuffer) { EXPECT_EQ(data, buffer.toString()); } -TEST_P(OwnedImplTest, PrependBuffer) { +TEST_F(OwnedImplTest, PrependBuffer) { std::string suffix = "World!", prefix = "Hello, "; Buffer::OwnedImpl buffer; - verifyImplementation(buffer); buffer.add(suffix); Buffer::OwnedImpl prefixBuffer; - verifyImplementation(buffer); prefixBuffer.add(prefix); buffer.prepend(prefixBuffer); @@ -227,12 +213,11 @@ TEST_P(OwnedImplTest, PrependBuffer) { EXPECT_EQ(0, prefixBuffer.length()); } -TEST_P(OwnedImplTest, Write) { +TEST_F(OwnedImplTest, Write) { Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); Buffer::OwnedImpl buffer; - verifyImplementation(buffer); Network::IoSocketHandleImpl io_handle; buffer.add("example"); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{7, 0})); @@ -278,12 +263,11 @@ TEST_P(OwnedImplTest, Write) { EXPECT_EQ(0, buffer.length()); } -TEST_P(OwnedImplTest, Read) { +TEST_F(OwnedImplTest, Read) { Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); Buffer::OwnedImpl buffer; - verifyImplementation(buffer); Network::IoSocketHandleImpl io_handle; EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{0, 0})); Api::IoCallUint64Result result = buffer.read(io_handle, 100); @@ -309,7 +293,7 @@ TEST_P(OwnedImplTest, Read) { EXPECT_EQ(0, buffer.length()); } -TEST_P(OwnedImplTest, ReserveCommit) { +TEST_F(OwnedImplTest, ReserveCommit) { // This fragment will later be added to the buffer. It is declared in an enclosing scope to // ensure it is not destructed until after the buffer is. const std::string input = "Hello, world"; @@ -317,8 +301,6 @@ TEST_P(OwnedImplTest, ReserveCommit) { { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); - // A zero-byte reservation should fail. static constexpr uint64_t NumIovecs = 16; Buffer::RawSlice iovecs[NumIovecs]; @@ -336,12 +318,6 @@ TEST_P(OwnedImplTest, ReserveCommit) { commitReservation(iovecs, num_reserved, buffer); EXPECT_EQ(1, buffer.length()); - // The remaining tests validate internal optimizations of the new deque-of-slices - // implementation, so they're not valid for the old evbuffer implementation. - if (buffer.usesOldImpl()) { - return; - } - // Request a reservation that fits in the remaining space at the end of the last slice. num_reserved = buffer.reserve(1, iovecs, NumIovecs); EXPECT_EQ(1, num_reserved); @@ -391,10 +367,8 @@ TEST_P(OwnedImplTest, ReserveCommit) { } } -TEST_P(OwnedImplTest, ReserveCommitReuse) { +TEST_F(OwnedImplTest, ReserveCommitReuse) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); - static constexpr uint64_t NumIovecs = 2; Buffer::RawSlice iovecs[NumIovecs]; @@ -429,10 +403,8 @@ TEST_P(OwnedImplTest, ReserveCommitReuse) { EXPECT_EQ(second_slice, iovecs[1].mem_); } -TEST_P(OwnedImplTest, ReserveReuse) { +TEST_F(OwnedImplTest, ReserveReuse) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); - static constexpr uint64_t NumIovecs = 2; Buffer::RawSlice iovecs[NumIovecs]; @@ -454,12 +426,11 @@ TEST_P(OwnedImplTest, ReserveReuse) { EXPECT_EQ(second_slice, iovecs[1].mem_); } -TEST_P(OwnedImplTest, Search) { +TEST_F(OwnedImplTest, Search) { // Populate a buffer with a string split across many small slices, to // exercise edge cases in the search implementation. static const char* Inputs[] = {"ab", "a", "", "aaa", "b", "a", "aaa", "ab", "a"}; Buffer::OwnedImpl buffer; - verifyImplementation(buffer); for (const auto& input : Inputs) { buffer.appendSliceForTest(input); } @@ -482,12 +453,11 @@ TEST_P(OwnedImplTest, Search) { EXPECT_EQ(-1, buffer.search("abaaaabaaaaabaa", 15, 0)); } -TEST_P(OwnedImplTest, StartsWith) { +TEST_F(OwnedImplTest, StartsWith) { // Populate a buffer with a string split across many small slices, to // exercise edge cases in the startsWith implementation. static const char* Inputs[] = {"ab", "a", "", "aaa", "b", "a", "aaa", "ab", "a"}; Buffer::OwnedImpl buffer; - verifyImplementation(buffer); for (const auto& input : Inputs) { buffer.appendSliceForTest(input); } @@ -505,9 +475,8 @@ TEST_P(OwnedImplTest, StartsWith) { EXPECT_FALSE(buffer.startsWith({"ba", 2})); } -TEST_P(OwnedImplTest, ToString) { +TEST_F(OwnedImplTest, ToString) { Buffer::OwnedImpl buffer; - verifyImplementation(buffer); EXPECT_EQ("", buffer.toString()); auto append = [&buffer](absl::string_view str) { buffer.add(str.data(), str.size()); }; append("Hello, "); @@ -521,7 +490,7 @@ TEST_P(OwnedImplTest, ToString) { EXPECT_EQ(absl::StrCat("Hello, world!" + long_string), buffer.toString()); } -TEST_P(OwnedImplTest, AppendSliceForTest) { +TEST_F(OwnedImplTest, AppendSliceForTest) { static constexpr size_t NumInputs = 3; static constexpr const char* Inputs[] = {"one", "2", "", "four", ""}; Buffer::OwnedImpl buffer; @@ -547,7 +516,7 @@ TEST_P(OwnedImplTest, AppendSliceForTest) { // Regression test for oss-fuzz issue // https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13263, where prepending // an empty buffer resulted in a corrupted libevent internal state. -TEST_P(OwnedImplTest, PrependEmpty) { +TEST_F(OwnedImplTest, PrependEmpty) { Buffer::OwnedImpl buf; Buffer::OwnedImpl other_buf; char input[] = "foo"; @@ -564,7 +533,7 @@ TEST_P(OwnedImplTest, PrependEmpty) { // Regression test for oss-fuzz issues // https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=14466, empty commit // following a reserve resulted in a corrupted libevent internal state. -TEST_P(OwnedImplTest, ReserveZeroCommit) { +TEST_F(OwnedImplTest, ReserveZeroCommit) { BufferFragmentImpl frag("", 0, nullptr); Buffer::OwnedImpl buf; buf.addBufferFragment(frag); diff --git a/test/common/buffer/utility.h b/test/common/buffer/utility.h index daf67d85c1d2..868427de465a 100644 --- a/test/common/buffer/utility.h +++ b/test/common/buffer/utility.h @@ -10,36 +10,6 @@ namespace Envoy { namespace Buffer { namespace { -/** Used to specify which OwnedImpl implementation to test. */ -enum class BufferImplementation { - Old, // original evbuffer-based version - New // new deque-of-slices version -}; - -/** - * Base class for tests that are parameterized based on BufferImplementation. - */ -class BufferImplementationParamTest : public testing::TestWithParam { -protected: - BufferImplementationParamTest() { - OwnedImpl::useOldImpl(GetParam() == BufferImplementation::Old); - } - - ~BufferImplementationParamTest() override = default; - - /** Verify that a buffer has been constructed using the expected implementation. */ - void verifyImplementation(const OwnedImpl& buffer) { - switch (GetParam()) { - case BufferImplementation::Old: - ASSERT_TRUE(buffer.usesOldImpl()); - break; - case BufferImplementation::New: - ASSERT_FALSE(buffer.usesOldImpl()); - break; - } - } -}; - inline void addRepeated(Buffer::Instance& buffer, int n, int8_t value) { for (int i = 0; i < n; i++) { buffer.add(&value, 1); diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index 6d6f043c310d..c2cf8607ab30 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -14,12 +14,9 @@ namespace { const char TEN_BYTES[] = "0123456789"; -class WatermarkBufferTest : public BufferImplementationParamTest { +class WatermarkBufferTest : public testing::Test { public: - WatermarkBufferTest() { - verifyImplementation(buffer_); - buffer_.setWatermarks(5, 10); - } + WatermarkBufferTest() { buffer_.setWatermarks(5, 10); } Buffer::WatermarkBuffer buffer_{[&]() -> void { ++times_low_watermark_called_; }, [&]() -> void { ++times_high_watermark_called_; }}; @@ -27,12 +24,9 @@ class WatermarkBufferTest : public BufferImplementationParamTest { uint32_t times_high_watermark_called_{0}; }; -INSTANTIATE_TEST_SUITE_P(WatermarkBufferTest, WatermarkBufferTest, - testing::ValuesIn({BufferImplementation::Old, BufferImplementation::New})); - -TEST_P(WatermarkBufferTest, TestWatermark) { ASSERT_EQ(10, buffer_.highWatermark()); } +TEST_F(WatermarkBufferTest, TestWatermark) { ASSERT_EQ(10, buffer_.highWatermark()); } -TEST_P(WatermarkBufferTest, CopyOut) { +TEST_F(WatermarkBufferTest, CopyOut) { buffer_.add("hello world"); std::array out; buffer_.copyOut(0, out.size(), out.data()); @@ -45,7 +39,7 @@ TEST_P(WatermarkBufferTest, CopyOut) { buffer_.copyOut(4, 0, out.data()); } -TEST_P(WatermarkBufferTest, AddChar) { +TEST_F(WatermarkBufferTest, AddChar) { buffer_.add(TEN_BYTES, 10); EXPECT_EQ(0, times_high_watermark_called_); buffer_.add("a", 1); @@ -53,7 +47,7 @@ TEST_P(WatermarkBufferTest, AddChar) { EXPECT_EQ(11, buffer_.length()); } -TEST_P(WatermarkBufferTest, AddString) { +TEST_F(WatermarkBufferTest, AddString) { buffer_.add(std::string(TEN_BYTES)); EXPECT_EQ(0, times_high_watermark_called_); buffer_.add(std::string("a")); @@ -61,7 +55,7 @@ TEST_P(WatermarkBufferTest, AddString) { EXPECT_EQ(11, buffer_.length()); } -TEST_P(WatermarkBufferTest, AddBuffer) { +TEST_F(WatermarkBufferTest, AddBuffer) { OwnedImpl first(TEN_BYTES); buffer_.add(first); EXPECT_EQ(0, times_high_watermark_called_); @@ -71,7 +65,7 @@ TEST_P(WatermarkBufferTest, AddBuffer) { EXPECT_EQ(11, buffer_.length()); } -TEST_P(WatermarkBufferTest, Prepend) { +TEST_F(WatermarkBufferTest, Prepend) { std::string suffix = "World!", prefix = "Hello, "; buffer_.add(suffix); @@ -81,7 +75,7 @@ TEST_P(WatermarkBufferTest, Prepend) { EXPECT_EQ(suffix.size() + prefix.size(), buffer_.length()); } -TEST_P(WatermarkBufferTest, PrependToEmptyBuffer) { +TEST_F(WatermarkBufferTest, PrependToEmptyBuffer) { std::string suffix = "World!", prefix = "Hello, "; buffer_.prepend(suffix); @@ -97,7 +91,7 @@ TEST_P(WatermarkBufferTest, PrependToEmptyBuffer) { EXPECT_EQ(suffix.size() + prefix.size(), buffer_.length()); } -TEST_P(WatermarkBufferTest, PrependBuffer) { +TEST_F(WatermarkBufferTest, PrependBuffer) { std::string suffix = "World!", prefix = "Hello, "; uint32_t prefix_buffer_low_watermark_hits{0}; @@ -118,7 +112,7 @@ TEST_P(WatermarkBufferTest, PrependBuffer) { EXPECT_EQ(0, prefixBuffer.length()); } -TEST_P(WatermarkBufferTest, Commit) { +TEST_F(WatermarkBufferTest, Commit) { buffer_.add(TEN_BYTES, 10); EXPECT_EQ(0, times_high_watermark_called_); RawSlice out; @@ -130,7 +124,7 @@ TEST_P(WatermarkBufferTest, Commit) { EXPECT_EQ(20, buffer_.length()); } -TEST_P(WatermarkBufferTest, Drain) { +TEST_F(WatermarkBufferTest, Drain) { // Draining from above to below the low watermark does nothing if the high // watermark never got hit. buffer_.add(TEN_BYTES, 10); @@ -153,7 +147,7 @@ TEST_P(WatermarkBufferTest, Drain) { EXPECT_EQ(2, times_high_watermark_called_); } -TEST_P(WatermarkBufferTest, MoveFullBuffer) { +TEST_F(WatermarkBufferTest, MoveFullBuffer) { buffer_.add(TEN_BYTES, 10); OwnedImpl data("a"); @@ -163,7 +157,7 @@ TEST_P(WatermarkBufferTest, MoveFullBuffer) { EXPECT_EQ(11, buffer_.length()); } -TEST_P(WatermarkBufferTest, MoveOneByte) { +TEST_F(WatermarkBufferTest, MoveOneByte) { buffer_.add(TEN_BYTES, 9); OwnedImpl data("ab"); @@ -176,7 +170,7 @@ TEST_P(WatermarkBufferTest, MoveOneByte) { EXPECT_EQ(11, buffer_.length()); } -TEST_P(WatermarkBufferTest, WatermarkFdFunctions) { +TEST_F(WatermarkBufferTest, WatermarkFdFunctions) { int pipe_fds[2] = {0, 0}; ASSERT_EQ(0, pipe(pipe_fds)); @@ -209,7 +203,7 @@ TEST_P(WatermarkBufferTest, WatermarkFdFunctions) { EXPECT_EQ(20, buffer_.length()); } -TEST_P(WatermarkBufferTest, MoveWatermarks) { +TEST_F(WatermarkBufferTest, MoveWatermarks) { buffer_.add(TEN_BYTES, 9); EXPECT_EQ(0, times_high_watermark_called_); buffer_.setWatermarks(1, 9); @@ -233,7 +227,7 @@ TEST_P(WatermarkBufferTest, MoveWatermarks) { EXPECT_EQ(2, times_low_watermark_called_); } -TEST_P(WatermarkBufferTest, GetRawSlices) { +TEST_F(WatermarkBufferTest, GetRawSlices) { buffer_.add(TEN_BYTES, 10); RawSlice slices[2]; @@ -245,7 +239,7 @@ TEST_P(WatermarkBufferTest, GetRawSlices) { EXPECT_EQ(data_pointer, slices[0].mem_); } -TEST_P(WatermarkBufferTest, Search) { +TEST_F(WatermarkBufferTest, Search) { buffer_.add(TEN_BYTES, 10); EXPECT_EQ(1, buffer_.search(&TEN_BYTES[1], 2, 0)); @@ -253,7 +247,7 @@ TEST_P(WatermarkBufferTest, Search) { EXPECT_EQ(-1, buffer_.search(&TEN_BYTES[1], 2, 5)); } -TEST_P(WatermarkBufferTest, StartsWith) { +TEST_F(WatermarkBufferTest, StartsWith) { buffer_.add(TEN_BYTES, 10); EXPECT_TRUE(buffer_.startsWith({TEN_BYTES, 2})); @@ -261,7 +255,7 @@ TEST_P(WatermarkBufferTest, StartsWith) { EXPECT_FALSE(buffer_.startsWith({&TEN_BYTES[1], 2})); } -TEST_P(WatermarkBufferTest, MoveBackWithWatermarks) { +TEST_F(WatermarkBufferTest, MoveBackWithWatermarks) { int high_watermark_buffer1 = 0; int low_watermark_buffer1 = 0; Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, diff --git a/test/common/buffer/zero_copy_input_stream_test.cc b/test/common/buffer/zero_copy_input_stream_test.cc index bd747ed20a67..ffd55cf46fe6 100644 --- a/test/common/buffer/zero_copy_input_stream_test.cc +++ b/test/common/buffer/zero_copy_input_stream_test.cc @@ -9,11 +9,10 @@ namespace Envoy { namespace Buffer { namespace { -class ZeroCopyInputStreamTest : public BufferImplementationParamTest { +class ZeroCopyInputStreamTest : public testing::Test { public: ZeroCopyInputStreamTest() { Buffer::OwnedImpl buffer{"abcd"}; - verifyImplementation(buffer); stream_.move(buffer); } @@ -24,24 +23,21 @@ class ZeroCopyInputStreamTest : public BufferImplementationParamTest { int size_; }; -TEST_P(ZeroCopyInputStreamTest, Move) { +TEST_F(ZeroCopyInputStreamTest, Move) { Buffer::OwnedImpl buffer{"abcd"}; - verifyImplementation(buffer); stream_.move(buffer); EXPECT_EQ(0, buffer.length()); } -TEST_P(ZeroCopyInputStreamTest, Next) { +TEST_F(ZeroCopyInputStreamTest, Next) { EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(4, size_); EXPECT_EQ(0, memcmp(slice_data_.data(), data_, size_)); } -TEST_P(ZeroCopyInputStreamTest, TwoSlices) { +TEST_F(ZeroCopyInputStreamTest, TwoSlices) { Buffer::OwnedImpl buffer("efgh"); - verifyImplementation(buffer); - stream_.move(buffer); EXPECT_TRUE(stream_.Next(&data_, &size_)); @@ -52,7 +48,7 @@ TEST_P(ZeroCopyInputStreamTest, TwoSlices) { EXPECT_EQ(0, memcmp("efgh", data_, size_)); } -TEST_P(ZeroCopyInputStreamTest, BackUp) { +TEST_F(ZeroCopyInputStreamTest, BackUp) { EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(4, size_); @@ -65,7 +61,7 @@ TEST_P(ZeroCopyInputStreamTest, BackUp) { EXPECT_EQ(4, stream_.ByteCount()); } -TEST_P(ZeroCopyInputStreamTest, BackUpFull) { +TEST_F(ZeroCopyInputStreamTest, BackUpFull) { EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(4, size_); @@ -76,13 +72,13 @@ TEST_P(ZeroCopyInputStreamTest, BackUpFull) { EXPECT_EQ(4, stream_.ByteCount()); } -TEST_P(ZeroCopyInputStreamTest, ByteCount) { +TEST_F(ZeroCopyInputStreamTest, ByteCount) { EXPECT_EQ(0, stream_.ByteCount()); EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(4, stream_.ByteCount()); } -TEST_P(ZeroCopyInputStreamTest, Finish) { +TEST_F(ZeroCopyInputStreamTest, Finish) { EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_TRUE(stream_.Next(&data_, &size_)); EXPECT_EQ(0, size_); diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 615a5273aed1..c11520f40022 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -733,16 +733,7 @@ TEST_F(QuicPlatformTest, TestQuicOptional) { EXPECT_EQ(1, *maybe_a); } -class QuicMemSliceTest : public Envoy::Buffer::BufferImplementationParamTest { -public: - ~QuicMemSliceTest() override = default; -}; - -INSTANTIATE_TEST_SUITE_P(QuicMemSliceTests, QuicMemSliceTest, - testing::ValuesIn({Envoy::Buffer::BufferImplementation::Old, - Envoy::Buffer::BufferImplementation::New})); - -TEST_P(QuicMemSliceTest, ConstructMemSliceFromBuffer) { +TEST(EnvoyQuicMemSliceTest, ConstructMemSliceFromBuffer) { std::string str(512, 'b'); // Fragment needs to out-live buffer. bool fragment_releaser_called = false; @@ -753,7 +744,6 @@ TEST_P(QuicMemSliceTest, ConstructMemSliceFromBuffer) { fragment_releaser_called = true; }); Envoy::Buffer::OwnedImpl buffer; - Envoy::Buffer::BufferImplementationParamTest::verifyImplementation(buffer); EXPECT_DEBUG_DEATH(quic::QuicMemSlice slice0{quic::QuicMemSliceImpl(buffer, 0)}, ""); std::string str2(1024, 'a'); // str2 is copied. @@ -781,9 +771,8 @@ TEST_P(QuicMemSliceTest, ConstructMemSliceFromBuffer) { EXPECT_TRUE(fragment_releaser_called); } -TEST_P(QuicMemSliceTest, ConstructQuicMemSliceSpan) { +TEST(EnvoyQuicMemSliceTest, ConstructQuicMemSliceSpan) { Envoy::Buffer::OwnedImpl buffer; - Envoy::Buffer::BufferImplementationParamTest::verifyImplementation(buffer); std::string str(1024, 'a'); buffer.add(str); quic::QuicMemSlice slice{quic::QuicMemSliceImpl(buffer, str.length())}; @@ -793,7 +782,7 @@ TEST_P(QuicMemSliceTest, ConstructQuicMemSliceSpan) { EXPECT_EQ(str, span.GetData(0)); } -TEST_P(QuicMemSliceTest, QuicMemSliceStorage) { +TEST(EnvoyQuicMemSliceTest, QuicMemSliceStorage) { std::string str(512, 'a'); struct iovec iov = {const_cast(str.data()), str.length()}; SimpleBufferAllocator allocator; diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 3fefa006932c..50853ff411cb 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -89,7 +89,6 @@ class MockOptions : public Options { MOCK_CONST_METHOD0(hotRestartDisabled, bool()); MOCK_CONST_METHOD0(signalHandlingEnabled, bool()); MOCK_CONST_METHOD0(mutexTracingEnabled, bool()); - MOCK_CONST_METHOD0(libeventBufferEnabled, bool()); MOCK_CONST_METHOD0(fakeSymbolTableEnabled, bool()); MOCK_CONST_METHOD0(cpusetThreadsEnabled, bool()); MOCK_CONST_METHOD0(toCommandLineOptions, Server::CommandLineOptionsPtr()); diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index 0a365db18544..e4b01924c021 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -93,7 +93,6 @@ TEST_F(OptionsImplTest, All) { EXPECT_EQ(std::chrono::seconds(60), options->drainTime()); EXPECT_EQ(std::chrono::seconds(90), options->parentShutdownTime()); EXPECT_TRUE(options->hotRestartDisabled()); - EXPECT_FALSE(options->libeventBufferEnabled()); EXPECT_TRUE(options->cpusetThreadsEnabled()); EXPECT_TRUE(options->allowUnknownStaticFields()); EXPECT_TRUE(options->rejectUnknownDynamicFields()); @@ -247,11 +246,10 @@ TEST_F(OptionsImplTest, OptionsAreInSyncWithProto) { // 1. version - default TCLAP argument. // 2. help - default TCLAP argument. // 3. ignore_rest - default TCLAP argument. - // 4. use-libevent-buffers - short-term override for rollout of new buffer implementation. - // 5. allow-unknown-fields - deprecated alias of allow-unknown-static-fields. - // 6. use-fake-symbol-table - short-term override for rollout of real symbol-table implementation. - // 7. hot restart version - print the hot restart version and exit. - EXPECT_EQ(options->count() - 7, command_line_options->GetDescriptor()->field_count()); + // 4. allow-unknown-fields - deprecated alias of allow-unknown-static-fields. + // 5. use-fake-symbol-table - short-term override for rollout of real symbol-table implementation. + // 6. hot restart version - print the hot restart version and exit. + EXPECT_EQ(options->count() - 6, command_line_options->GetDescriptor()->field_count()); } TEST_F(OptionsImplTest, BadCliOption) { From 274f17cabe7f49f105c82919937ab710b9428261 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Sat, 9 Nov 2019 14:20:07 -0300 Subject: [PATCH 03/14] dynamic proxy: add support for header based routing (#8869) Signed-off-by: Raul Gutierrez Segales --- api/envoy/api/v2/route/route.proto | 4 +++ api/envoy/api/v3alpha/route/route.proto | 4 +++ .../v2alpha/dynamic_forward_proxy.proto | 31 +++++++++++++++--- .../v3alpha/dynamic_forward_proxy.proto | 32 ++++++++++++++++--- docs/root/intro/version_history.rst | 1 + .../dynamic_forward_proxy/proxy_filter.cc | 12 ++++++- .../http/dynamic_forward_proxy/proxy_filter.h | 2 ++ .../proxy_filter_test.cc | 26 +++++++++++++++ 8 files changed, 103 insertions(+), 9 deletions(-) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index 8244b0134d14..c28500e69efc 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -727,6 +727,8 @@ message RouteAction { oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with // this value. + // + // [#next-major-version: host_rewrite_literal] string host_rewrite = 6; // Indicates that during forwarding, the host header will be swapped with @@ -744,6 +746,8 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. + // + // [#next-major-version: host_rewrite_header] string auto_host_rewrite_header = 29; } diff --git a/api/envoy/api/v3alpha/route/route.proto b/api/envoy/api/v3alpha/route/route.proto index 8e0513d2b848..68c0911f9ecd 100644 --- a/api/envoy/api/v3alpha/route/route.proto +++ b/api/envoy/api/v3alpha/route/route.proto @@ -667,6 +667,8 @@ message RouteAction { oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with // this value. + // + // [#next-major-version: host_rewrite_literal] string host_rewrite = 6; // Indicates that during forwarding, the host header will be swapped with @@ -684,6 +686,8 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. + // + // [#next-major-version: host_rewrite_header] string auto_host_rewrite_header = 29; } diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index 04fe17993184..dbe548c346b5 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -24,8 +24,31 @@ message FilterConfig { // Per route Configuration for the dynamic forward proxy HTTP filter. message PerRouteConfig { - // Indicates that before DNS lookup, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - string host_rewrite = 1; + oneof host_rewrite_specifier { + // Indicates that before DNS lookup, the host header will be swapped with + // this value. If not set or empty, the original host header value + // will be used and no rewrite will happen. + // + // Note: this rewrite affects both DNS lookup and host header forwarding. However, this + // option shouldn't be used with + // :ref:`HCM host rewrite ` given that the + // value set here would be used for DNS lookups whereas the value set in the HCM would be used + // for host header forwarding which is not the desired outcome. + // + // [#next-major-version: host_rewrite_literal] + string host_rewrite = 1; + + // Indicates that before DNS lookup, the host header will be swapped with + // the value of this header. If not set or empty, the original host header + // value will be used and no rewrite will happen. + // + // Note: this rewrite affects both DNS lookup and host header forwarding. However, this + // option shouldn't be used with + // :ref:`HCM host rewrite header ` + // given that the value set here would be used for DNS lookups whereas the value set in the HCM + // would be used for host header forwarding which is not the desired outcome. + // + // [#next-major-version: host_rewrite_header] + string auto_host_rewrite_header = 2; + } } diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto index 8ccb59dd1e3c..d63094f38832 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto @@ -24,8 +24,32 @@ message FilterConfig { // Per route Configuration for the dynamic forward proxy HTTP filter. message PerRouteConfig { - // Indicates that before DNS lookup, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - string host_rewrite = 1; + oneof host_rewrite_specifier { + // Indicates that before DNS lookup, the host header will be swapped with + // this value. If not set or empty, the original host header value + // will be used and no rewrite will happen. + // + // Note: this rewrite affects both DNS lookup and host header forwarding. However, this + // option shouldn't be used with + // :ref:`HCM host rewrite ` given + // that the value set here would be used for DNS lookups whereas the value set in the HCM would + // be used for host header forwarding which is not the desired outcome. + // + // [#next-major-version: host_rewrite_literal] + string host_rewrite = 1; + + // Indicates that before DNS lookup, the host header will be swapped with + // the value of this header. If not set or empty, the original host header + // value will be used and no rewrite will happen. + // + // Note: this rewrite affects both DNS lookup and host header forwarding. However, this + // option shouldn't be used with + // :ref:`HCM host rewrite header + // ` given that the + // value set here would be used for DNS lookups whereas the value set in the HCM would be used + // for host header forwarding which is not the desired outcome. + // + // [#next-major-version: host_rewrite_header] + string auto_host_rewrite_header = 2; + } } diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 49174c77bf64..5d5b987841dc 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -7,6 +7,7 @@ Version history * api: remove all support for v1 * buffer: remove old implementation * build: official released binary is now built against libc++. +* http: support :ref:`auto_host_rewrite_header` in the dynamic forward proxy. * logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. * redis: performance improvement for larger split commands by avoiding string copies. * router: added support for REQ(header-name) :ref:`header formatter `. diff --git a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc index ae88762a542f..bbc9526df086 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc @@ -27,7 +27,8 @@ ProxyFilterConfig::ProxyFilterConfig( ProxyPerRouteConfig::ProxyPerRouteConfig( const envoy::config::filter::http::dynamic_forward_proxy::v2alpha::PerRouteConfig& config) - : host_rewrite_(config.host_rewrite()) {} + : host_rewrite_(config.host_rewrite()), + host_rewrite_header_(Http::LowerCaseString(config.auto_host_rewrite_header())) {} void ProxyFilter::onDestroy() { // Make sure we destroy any active cache load handle in case we are getting reset and deferred @@ -75,6 +76,15 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::HeaderMap& headers, b if (!host_rewrite.empty()) { headers.Host()->value(host_rewrite); } + + const auto& host_rewrite_header = config->hostRewriteHeader(); + if (!host_rewrite_header.get().empty()) { + const auto* header = headers.get(host_rewrite_header); + if (header != nullptr) { + const auto& header_value = header->value().getStringView(); + headers.Host()->value(header_value); + } + } } // See the comments in dns_cache.h for how loadDnsCacheEntry() handles hosts with embedded ports. diff --git a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h index e2e09d5970d9..25b905c59cc2 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h +++ b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.h @@ -35,9 +35,11 @@ class ProxyPerRouteConfig : public ::Envoy::Router::RouteSpecificFilterConfig { const envoy::config::filter::http::dynamic_forward_proxy::v2alpha::PerRouteConfig& config); const std::string& hostRewrite() const { return host_rewrite_; } + const Http::LowerCaseString& hostRewriteHeader() const { return host_rewrite_header_; } private: const std::string host_rewrite_; + const Http::LowerCaseString host_rewrite_header_; }; class ProxyFilter diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc index 5ac61ce3bb5f..521a14d55f73 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc @@ -195,6 +195,32 @@ TEST_F(ProxyFilterTest, HostRewrite) { filter_->onDestroy(); } +TEST_F(ProxyFilterTest, HostRewriteViaHeader) { + InSequence s; + + envoy::config::filter::http::dynamic_forward_proxy::v2alpha::PerRouteConfig proto_config; + proto_config.set_auto_host_rewrite_header("x-set-header"); + ProxyPerRouteConfig config(proto_config); + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); + Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = + new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); + EXPECT_CALL(callbacks_.route_->route_entry_, + perFilterConfig(HttpFilterNames::get().DynamicForwardProxy)) + .WillOnce(Return(&config)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("bar:82"), 80, _)) + .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle})); + + Http::TestHeaderMapImpl headers{{":authority", "foo"}, {"x-set-header", "bar:82"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(headers, false)); + + EXPECT_CALL(*handle, onDestroy()); + filter_->onDestroy(); +} + } // namespace } // namespace DynamicForwardProxy } // namespace HttpFilters From 2c830f1aace9970e132440ade31f3c460f250d23 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Sun, 10 Nov 2019 00:21:26 +0700 Subject: [PATCH 04/14] ext_authz: Make sure `injectContext` is called (#8961) This patch makes sure active span `injectContext` is called before making the check request for the HTTP implementation of `ext_authz`. Signed-off-by: Dhi Aurrahman --- .../filters/common/ext_authz/ext_authz_http_impl.cc | 1 + .../common/ext_authz/ext_authz_http_impl_test.cc | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 02728b63e123..70bf6fff9d19 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -229,6 +229,7 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, callbacks_ = nullptr; span_ = nullptr; } else { + span_->injectContext(message->headers()); request_ = cm_.httpAsyncClientForCluster(cluster).send( std::move(message), *this, Http::AsyncClient::RequestOptions().setTimeout(config_->timeout())); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 7ea6341fee7a..9a8bd9b57c85 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -232,6 +232,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOk) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); client_.check(request_callbacks_, request, active_span_); @@ -257,6 +258,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeaders) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); // Expect that header1 will be added and header2 correctly overwritten. EXPECT_CALL(async_client_, send_(AllOf(ContainsPairAsHeader(config_->headersToAdd().front()), ContainsPairAsHeader(config_->headersToAdd().back())), @@ -286,6 +288,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); client_.check(request_callbacks_, request, active_span_); const auto check_response_headers = @@ -315,6 +318,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); client_.check(request_callbacks_, request, active_span_); EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); @@ -337,6 +341,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithAllAttributes) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); envoy::service::auth::v2::CheckRequest request; client_.check(request_callbacks_, request, active_span_); @@ -362,6 +367,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); envoy::service::auth::v2::CheckRequest request; client_.check(request_callbacks_, request, active_span_); @@ -385,6 +391,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); client_.check(request_callbacks_, request, active_span_); @@ -405,6 +412,8 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequest5xxError) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); + client_.check(request_callbacks_, request, active_span_); EXPECT_CALL(request_callbacks_, @@ -425,6 +434,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestErrorParsingStatusCode) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); client_.check(request_callbacks_, request, active_span_); @@ -443,6 +453,7 @@ TEST_F(ExtAuthzHttpClientTest, CancelledAuthorizationRequest) { EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + EXPECT_CALL(*child_span, injectContext(_)); EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_)); client_.check(request_callbacks_, request, active_span_); From 4bcf677c9cb41db0dc7b679ef2a569ea7a627024 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Sun, 10 Nov 2019 18:03:16 -0800 Subject: [PATCH 05/14] ci: fix master clang-tidy run (#8965) Use TargetBranch for clang-tidy diff: https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml Signed-off-by: Lizan Zhou --- ci/run_clang_tidy.sh | 6 +++--- ci/run_envoy_docker.sh | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index e54a8e5bbae8..224beb70af2c 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -52,12 +52,12 @@ LLVM_PREFIX=$(llvm-config --prefix) if [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then echo "Running full clang-tidy..." "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" -elif [[ -z "${CIRCLE_PR_NUMBER}" && "$CIRCLE_BRANCH" == "master" ]]; then - echo "On master branch, running clang-tidy-diff against previous commit..." +elif [[ "${BUILD_REASON}" != "PullRequest" ]]; then + echo "Running clang-tidy-diff against previous commit..." git diff HEAD^ | filter_excludes | "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" -p 1 else echo "Running clang-tidy-diff against master branch..." git fetch https://github.com/envoyproxy/envoy.git master - git diff $(git merge-base HEAD FETCH_HEAD)..HEAD | filter_excludes | \ + git diff "${SYSTEM_PULLREQUEST_TARGETBRANCH:-refs/heads/master}..HEAD" | filter_excludes | \ "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" -p 1 fi diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index a2eed32a5c34..f255826f8bc4 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -28,6 +28,7 @@ docker run --rm ${DOCKER_TTY_OPTION} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY= -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build -v /var/run/docker.sock:/var/run/docker.sock ${GIT_VOLUME_OPTION} \ -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE -e ENVOY_STDLIB -e BUILD_REASON \ -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE -e FUZZIT_API_KEY -e ENVOY_BUILD_IMAGE \ + -e SYSTEM_PULLREQUEST_TARGETBRANCH \ -v "$PWD":/source --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${ENVOY_BUILD_IMAGE}" \ /bin/bash -lc "groupadd --gid $(id -g) -f envoygroup && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home \ --home-dir /source envoybuild && usermod -a -G pcap envoybuild && su envoybuild -c \"cd source && $*\"" From d55db09b713c7d32d4a071c72915638ae9e21e87 Mon Sep 17 00:00:00 2001 From: Venil Noronha Date: Sun, 10 Nov 2019 18:03:47 -0800 Subject: [PATCH 06/14] tests: enable crypto dependent tests (#8964) This fixes some crypto dependent tests. Signed-off-by: Venil Noronha --- test/common/config/BUILD | 7 +------ test/common/crypto/BUILD | 8 ++------ 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 95cedf72769c..7367f62e35bb 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -7,10 +7,6 @@ load( "envoy_package", "envoy_proto_library", ) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", -) envoy_package() @@ -320,10 +316,9 @@ envoy_cc_test( ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "datasource_test", srcs = ["datasource_test.cc"], - extension_name = "envoy.extensions.common.crypto.utility_lib", deps = [ "//source/common/common:empty_string", "//source/common/config:datasource_lib", diff --git a/test/common/crypto/BUILD b/test/common/crypto/BUILD index 54d113b70823..b1f7d592ace4 100644 --- a/test/common/crypto/BUILD +++ b/test/common/crypto/BUILD @@ -2,21 +2,17 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_test", "envoy_package", ) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", -) envoy_package() -envoy_extension_cc_test( +envoy_cc_test( name = "utility_test", srcs = [ "utility_test.cc", ], - extension_name = "envoy.extensions.common.crypto.utility_lib", external_deps = [ "ssl", ], From 76076fdd4c1a43e821375cd18054ab40aa7e3874 Mon Sep 17 00:00:00 2001 From: James Peach Date: Mon, 11 Nov 2019 13:05:23 +1100 Subject: [PATCH 07/14] Make the build setup script more robust. (#8960) If `ci/build_setup.sh` is killed at an inopportune time, it can have created the filter checkout directory but not cloned the git repository to it. This leads to hard-to-debug failures. The (partial) fix is to check for the git metadata directory before assuming it is OK. Signed-off-by: James Peach --- ci/build_setup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/build_setup.sh b/ci/build_setup.sh index e5950ca8570c..7450b1c58bf3 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -85,8 +85,8 @@ export BAZEL_BUILD_OPTIONS="--verbose_failures ${BAZEL_OPTIONS} --action_env=HOM if [ "$1" != "-nofetch" ]; then # Setup Envoy consuming project. - if [[ ! -a "${ENVOY_FILTER_EXAMPLE_SRCDIR}" ]] - then + if [[ ! -d "${ENVOY_FILTER_EXAMPLE_SRCDIR}/.git" ]]; then + rm -rf "${ENVOY_FILTER_EXAMPLE_SRCDIR}" git clone https://github.com/envoyproxy/envoy-filter-example.git "${ENVOY_FILTER_EXAMPLE_SRCDIR}" fi From 0e64a6c166dba1ad6a5d721e25a7462cd6f7fc91 Mon Sep 17 00:00:00 2001 From: Eric Lee Date: Sun, 10 Nov 2019 18:06:04 -0800 Subject: [PATCH 08/14] repair broken image reference (#8962) Signed-off-by: Eric Lee --- source/common/config/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/config/README.md b/source/common/config/README.md index 7774c91befbf..3135bd56bdaa 100644 --- a/source/common/config/README.md +++ b/source/common/config/README.md @@ -41,7 +41,7 @@ delta-specific logic; its GrpxMuxImpl implementation (TODO will be merged into N and has SotW-specific logic. Both the delta and SotW Subscription implementations (TODO will be merged) hold a shared_ptr. The shared_ptr allows for both non- and aggregated: if non-aggregated, you'll be the only holder of that shared_ptr. -![xDS_code_diagram_june2019](xDS_code_diagram_june2019.png) +![xDS_code_diagram](xDS_code_diagram.png) Note that the orange flow does not necessarily have to happen in response to the blue flow; there can be spontaneous updates. ACKs are not shown in this diagram; they are also carred by the [Delta]DiscoveryRequest protos. What does GrpcXdsContext even do in this diagram? Just own things and pass through function calls? Answer: it sequences the requests and ACKs that the various type_urls send. From 2d859aab9bb36223a4fade0f5af71834796b707d Mon Sep 17 00:00:00 2001 From: John Plevyak Date: Mon, 11 Nov 2019 08:32:44 -0800 Subject: [PATCH 09/14] Fix TSAN warning from unlocked cross-thread read. (#8957) Signed-off-by: John Plevyak --- source/server/drain_manager_impl.cc | 4 +++- source/server/drain_manager_impl.h | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/source/server/drain_manager_impl.cc b/source/server/drain_manager_impl.cc index 852452da10ab..03dbf548c265 100644 --- a/source/server/drain_manager_impl.cc +++ b/source/server/drain_manager_impl.cc @@ -28,7 +28,7 @@ bool DrainManagerImpl::drainClose() const { return true; } - if (!draining()) { + if (!draining_) { return false; } @@ -51,7 +51,9 @@ void DrainManagerImpl::drainSequenceTick() { void DrainManagerImpl::startDrainSequence(std::function completion) { drain_sequence_completion_ = completion; + ASSERT(!draining_); ASSERT(!drain_tick_timer_); + draining_ = true; drain_tick_timer_ = server_.dispatcher().createTimer([this]() -> void { drainSequenceTick(); }); drainSequenceTick(); } diff --git a/source/server/drain_manager_impl.h b/source/server/drain_manager_impl.h index 4182edb9e843..8f44ad1cf99a 100644 --- a/source/server/drain_manager_impl.h +++ b/source/server/drain_manager_impl.h @@ -27,12 +27,12 @@ class DrainManagerImpl : Logger::Loggable, public DrainManager void startParentShutdownSequence() override; private: - bool draining() const { return drain_tick_timer_ != nullptr; } void drainSequenceTick(); Instance& server_; const envoy::api::v2::Listener::DrainType drain_type_; Event::TimerPtr drain_tick_timer_; + std::atomic draining_{false}; std::atomic drain_time_completed_{}; Event::TimerPtr parent_shutdown_timer_; std::function drain_sequence_completion_; From 766f3fb8dbdafce402631c43c16fda46ed003462 Mon Sep 17 00:00:00 2001 From: Steve Larkin Date: Mon, 11 Nov 2019 18:49:56 +0100 Subject: [PATCH 10/14] ext_authz: Set the peer's certificate in the source attributes (#8327) Set the downstream client X.509 certificate in the source Peer AttributeContext Risk Level: low Testing: Tests updated and extended. Docs Changes: New API additions are documented. Release Notes: Added. Fixes #8326 Signed-off-by: Steve Larkin --- .../filter/http/ext_authz/v2/ext_authz.proto | 8 ++- .../http/ext_authz/v3alpha/ext_authz.proto | 8 ++- .../network/ext_authz/v2/ext_authz.proto | 6 ++ .../network/ext_authz/v3alpha/ext_authz.proto | 6 ++ .../service/auth/v2/attribute_context.proto | 5 ++ .../auth/v3alpha/attribute_context.proto | 5 ++ .../http/http_filters/ext_authz_filter.rst | 2 + .../network_filters/ext_authz_filter.rst | 1 + docs/root/intro/version_history.rst | 1 + .../common/ext_authz/check_request_utils.cc | 26 +++++--- .../common/ext_authz/check_request_utils.h | 9 ++- .../filters/http/ext_authz/ext_authz.cc | 2 +- .../filters/http/ext_authz/ext_authz.h | 5 ++ .../filters/network/ext_authz/ext_authz.cc | 3 +- .../filters/network/ext_authz/ext_authz.h | 5 +- .../ext_authz/check_request_utils_test.cc | 66 +++++++++++++++---- 16 files changed, 130 insertions(+), 28 deletions(-) diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index 475b354f658c..0f70acfc076e 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -17,7 +17,7 @@ import "validate/validate.proto"; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. -// [#next-free-field: 10] +// [#next-free-field: 11] message ExtAuthz { // External authorization service configuration. oneof services { @@ -90,6 +90,12 @@ message ExtAuthz { // // If this field is not specified, the filter will be enabled for all requests. api.v2.core.RuntimeFractionalPercent filter_enabled = 9; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 10; } // Configuration for buffering the request data. diff --git a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto index 615887e3ed8f..c884f6ebe852 100644 --- a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto @@ -17,7 +17,7 @@ import "validate/validate.proto"; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. -// [#next-free-field: 10] +// [#next-free-field: 11] message ExtAuthz { reserved 4; @@ -88,6 +88,12 @@ message ExtAuthz { // // If this field is not specified, the filter will be enabled for all requests. api.v3alpha.core.RuntimeFractionalPercent filter_enabled = 9; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 10; } // Configuration for buffering the request data. diff --git a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto index bc9ed5d51ca4..9b8e2b7a7a02 100644 --- a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto @@ -31,4 +31,10 @@ message ExtAuthz { // communication failure between authorization service and the proxy. // Defaults to false. bool failure_mode_allow = 3; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 4; } diff --git a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto index 574fd170da18..97c6e4d45075 100644 --- a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto @@ -31,4 +31,10 @@ message ExtAuthz { // communication failure between authorization service and the proxy. // Defaults to false. bool failure_mode_allow = 3; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 4; } diff --git a/api/envoy/service/auth/v2/attribute_context.proto b/api/envoy/service/auth/v2/attribute_context.proto index 6fd24ea13a02..a694b13763f0 100644 --- a/api/envoy/service/auth/v2/attribute_context.proto +++ b/api/envoy/service/auth/v2/attribute_context.proto @@ -39,6 +39,7 @@ message AttributeContext { // The node can be either a service or an application that sends, forwards, // or receives the request. Service peers should fill in the `service`, // `principal`, and `labels` as appropriate. + // [#next-free-field: 6] message Peer { // The address of the peer, this is typically the IP address. // It can also be UDS path, or others. @@ -66,6 +67,10 @@ message AttributeContext { // * SPIFFE format is `spiffe://trust-domain/path` // * Google account format is `https://accounts.google.com/{userid}` string principal = 4; + + // The X.509 certificate used to authenticate the identify of this peer. + // When present, the certificate contents are encoded in URL and PEM format. + string certificate = 5; } // Represents a network request, such as an HTTP request. diff --git a/api/envoy/service/auth/v3alpha/attribute_context.proto b/api/envoy/service/auth/v3alpha/attribute_context.proto index 24214aa2ef27..22e059598b93 100644 --- a/api/envoy/service/auth/v3alpha/attribute_context.proto +++ b/api/envoy/service/auth/v3alpha/attribute_context.proto @@ -39,6 +39,7 @@ message AttributeContext { // The node can be either a service or an application that sends, forwards, // or receives the request. Service peers should fill in the `service`, // `principal`, and `labels` as appropriate. + // [#next-free-field: 6] message Peer { // The address of the peer, this is typically the IP address. // It can also be UDS path, or others. @@ -66,6 +67,10 @@ message AttributeContext { // * SPIFFE format is `spiffe://trust-domain/path` // * Google account format is `https://accounts.google.com/{userid}` string principal = 4; + + // The X.509 certificate used to authenticate the identify of this peer. + // When present, the certificate contents are encoded in URL and PEM format. + string certificate = 5; } // Represents a network request, such as an HTTP request. diff --git a/docs/root/configuration/http/http_filters/ext_authz_filter.rst b/docs/root/configuration/http/http_filters/ext_authz_filter.rst index 701ce520d163..3f2c16712359 100644 --- a/docs/root/configuration/http/http_filters/ext_authz_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_authz_filter.rst @@ -37,6 +37,7 @@ A sample filter configuration for a gRPC authorization server: # Default is 200ms; override if your server needs e.g. warmup time. timeout: 0.5s + include_peer_certificate: true .. code-block:: yaml @@ -71,6 +72,7 @@ A sample filter configuration for a raw HTTP authorization server: cluster: ext-authz timeout: 0.25s failure_mode_allow: false + include_peer_certificate: true .. code-block:: yaml diff --git a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst index c0702ae02671..222954df29f9 100644 --- a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst +++ b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst @@ -37,6 +37,7 @@ A sample filter configuration could be: grpc_service: envoy_grpc: cluster_name: ext-authz + include_peer_certificate: true clusters: - name: ext-authz diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 5d5b987841dc..2641d8160f99 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -7,6 +7,7 @@ Version history * api: remove all support for v1 * buffer: remove old implementation * build: official released binary is now built against libc++. +* ext_authz: added :ref:`configurable ability` to send the :ref:`certificate` to the `ext_authz` service. * http: support :ref:`auto_host_rewrite_header` in the dynamic forward proxy. * logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. * redis: performance improvement for larger split commands by avoiding string copies. diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.cc b/source/extensions/filters/common/ext_authz/check_request_utils.cc index b29c39708476..d9cb13eafd4b 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.cc +++ b/source/extensions/filters/common/ext_authz/check_request_utils.cc @@ -27,7 +27,8 @@ namespace ExtAuthz { void CheckRequestUtils::setAttrContextPeer(envoy::service::auth::v2::AttributeContext_Peer& peer, const Network::Connection& connection, - const std::string& service, const bool local) { + const std::string& service, const bool local, + bool include_certificate) { // Set the address auto addr = peer.mutable_address(); @@ -38,7 +39,7 @@ void CheckRequestUtils::setAttrContextPeer(envoy::service::auth::v2::AttributeCo } // Set the principal. Preferably the URI SAN, DNS SAN or Subject in that order from the peer's - // cert. + // cert. Include the X.509 certificate of the source peer, if configured to do so. auto ssl = connection.ssl(); if (ssl != nullptr) { if (local) { @@ -65,6 +66,9 @@ void CheckRequestUtils::setAttrContextPeer(envoy::service::auth::v2::AttributeCo } else { peer.set_principal(uri_sans[0]); } + if (include_certificate) { + peer.set_certificate(ssl->urlEncodedPemEncodedPeerCertificate()); + } } } @@ -153,7 +157,8 @@ void CheckRequestUtils::createHttpCheck( const Envoy::Http::HeaderMap& headers, Protobuf::Map&& context_extensions, envoy::api::v2::core::Metadata&& metadata_context, - envoy::service::auth::v2::CheckRequest& request, uint64_t max_request_bytes) { + envoy::service::auth::v2::CheckRequest& request, uint64_t max_request_bytes, + bool include_peer_certificate) { auto attrs = request.mutable_attributes(); @@ -162,8 +167,10 @@ void CheckRequestUtils::createHttpCheck( const std::string service = getHeaderStr(headers.EnvoyDownstreamServiceCluster()); - setAttrContextPeer(*attrs->mutable_source(), *cb->connection(), service, false); - setAttrContextPeer(*attrs->mutable_destination(), *cb->connection(), "", true); + setAttrContextPeer(*attrs->mutable_source(), *cb->connection(), service, false, + include_peer_certificate); + setAttrContextPeer(*attrs->mutable_destination(), *cb->connection(), "", true, + include_peer_certificate); setAttrContextRequest(*attrs->mutable_request(), callbacks, headers, max_request_bytes); // Fill in the context extensions: @@ -172,13 +179,16 @@ void CheckRequestUtils::createHttpCheck( } void CheckRequestUtils::createTcpCheck(const Network::ReadFilterCallbacks* callbacks, - envoy::service::auth::v2::CheckRequest& request) { + envoy::service::auth::v2::CheckRequest& request, + bool include_peer_certificate) { auto attrs = request.mutable_attributes(); Network::ReadFilterCallbacks* cb = const_cast(callbacks); - setAttrContextPeer(*attrs->mutable_source(), cb->connection(), "", false); - setAttrContextPeer(*attrs->mutable_destination(), cb->connection(), "", true); + setAttrContextPeer(*attrs->mutable_source(), cb->connection(), "", false, + include_peer_certificate); + setAttrContextPeer(*attrs->mutable_destination(), cb->connection(), "", true, + include_peer_certificate); } } // namespace ExtAuthz diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.h b/source/extensions/filters/common/ext_authz/check_request_utils.h index 6f90d8d86b1a..7e27cd8189ce 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.h +++ b/source/extensions/filters/common/ext_authz/check_request_utils.h @@ -44,27 +44,30 @@ class CheckRequestUtils { * check request. * @param request is the reference to the check request that will be filled up. * @param with_request_body when true, will add the request body to the check request. + * @param include_peer_certificate whether to include the peer certificate in the check request. */ static void createHttpCheck(const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, const Envoy::Http::HeaderMap& headers, Protobuf::Map&& context_extensions, envoy::api::v2::core::Metadata&& metadata_context, envoy::service::auth::v2::CheckRequest& request, - uint64_t max_request_bytes); + uint64_t max_request_bytes, bool include_peer_certificate); /** * createTcpCheck is used to extract the attributes from the network layer and fill them up * in the CheckRequest proto message. * @param callbacks supplies the network layer context from which data can be extracted. * @param request is the reference to the check request that will be filled up. + * @param include_peer_certificate whether to include the peer certificate in the check request. */ static void createTcpCheck(const Network::ReadFilterCallbacks* callbacks, - envoy::service::auth::v2::CheckRequest& request); + envoy::service::auth::v2::CheckRequest& request, + bool include_peer_certificate); private: static void setAttrContextPeer(envoy::service::auth::v2::AttributeContext_Peer& peer, const Network::Connection& connection, const std::string& service, - const bool local); + const bool local, bool include_certificate); static void setHttpRequest(::envoy::service::auth::v2::AttributeContext_HttpRequest& httpreq, const Envoy::Http::StreamDecoderFilterCallbacks* callbacks, const Envoy::Http::HeaderMap& headers, uint64_t max_request_bytes); diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 08de5ff948f0..36b3ae6c4ac3 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -75,7 +75,7 @@ void Filter::initiateCall(const Http::HeaderMap& headers) { Filters::Common::ExtAuthz::CheckRequestUtils::createHttpCheck( callbacks_, headers, std::move(context_extensions), std::move(metadata_context), - check_request_, config_->maxRequestBytes()); + check_request_, config_->maxRequestBytes(), config_->includePeerCertificate()); ENVOY_STREAM_LOG(trace, "ext_authz filter calling authorization server", *callbacks_); state_ = State::Calling; diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index c164eceab76f..7d1a9f40c010 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -74,6 +74,7 @@ class FilterConfig { pool_(scope_.symbolTable()), metadata_context_namespaces_(config.metadata_context_namespaces().begin(), config.metadata_context_namespaces().end()), + include_peer_certificate_(config.include_peer_certificate()), stats_(generateStats(stats_prefix, scope)), ext_authz_ok_(pool_.add("ext_authz.ok")), ext_authz_denied_(pool_.add("ext_authz.denied")), ext_authz_error_(pool_.add("ext_authz.error")), @@ -111,6 +112,8 @@ class FilterConfig { scope.counterFromStatName(name).inc(); } + bool includePeerCertificate() const { return include_peer_certificate_; } + private: static Http::Code toErrorCode(uint64_t status) { const auto code = static_cast(status); @@ -142,6 +145,8 @@ class FilterConfig { const std::vector metadata_context_namespaces_; + const bool include_peer_certificate_; + // The stats for the filter. ExtAuthzFilterStats stats_; diff --git a/source/extensions/filters/network/ext_authz/ext_authz.cc b/source/extensions/filters/network/ext_authz/ext_authz.cc index b3b37c02ec7a..cf4a8a4ab189 100644 --- a/source/extensions/filters/network/ext_authz/ext_authz.cc +++ b/source/extensions/filters/network/ext_authz/ext_authz.cc @@ -20,7 +20,8 @@ InstanceStats Config::generateStats(const std::string& name, Stats::Scope& scope } void Filter::callCheck() { - Filters::Common::ExtAuthz::CheckRequestUtils::createTcpCheck(filter_callbacks_, check_request_); + Filters::Common::ExtAuthz::CheckRequestUtils::createTcpCheck(filter_callbacks_, check_request_, + config_->includePeerCertificate()); status_ = Status::Calling; config_->stats().active_.inc(); diff --git a/source/extensions/filters/network/ext_authz/ext_authz.h b/source/extensions/filters/network/ext_authz/ext_authz.h index a0a963750be1..80743c678212 100644 --- a/source/extensions/filters/network/ext_authz/ext_authz.h +++ b/source/extensions/filters/network/ext_authz/ext_authz.h @@ -47,16 +47,19 @@ class Config { public: Config(const envoy::config::filter::network::ext_authz::v2::ExtAuthz& config, Stats::Scope& scope) : stats_(generateStats(config.stat_prefix(), scope)), - failure_mode_allow_(config.failure_mode_allow()) {} + failure_mode_allow_(config.failure_mode_allow()), + include_peer_certificate_(config.include_peer_certificate()) {} const InstanceStats& stats() { return stats_; } bool failureModeAllow() const { return failure_mode_allow_; } void setFailModeAllow(bool value) { failure_mode_allow_ = value; } + bool includePeerCertificate() const { return include_peer_certificate_; } private: static InstanceStats generateStats(const std::string& name, Stats::Scope& scope); const InstanceStats stats_; bool failure_mode_allow_; + const bool include_peer_certificate_; }; using ConfigSharedPtr = std::shared_ptr; diff --git a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc index de4ef025784e..60fc785c7e6c 100644 --- a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc +++ b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc @@ -42,7 +42,7 @@ class CheckRequestUtilsTest : public testing::Test { EXPECT_CALL(req_info_, protocol()).Times(2).WillRepeatedly(ReturnPointee(&protocol_)); } - void callHttpCheckAndValidateRequestAttributes() { + void callHttpCheckAndValidateRequestAttributes(bool include_peer_certificate) { Http::TestHeaderMapImpl request_headers{{"x-envoy-downstream-service-cluster", "foo"}, {":path", "/bar"}}; envoy::service::auth::v2::CheckRequest request; @@ -54,7 +54,8 @@ class CheckRequestUtilsTest : public testing::Test { (*metadata_context.mutable_filter_metadata())["meta.key"] = metadata_val; CheckRequestUtils::createHttpCheck(&callbacks_, request_headers, std::move(context_extensions), - std::move(metadata_context), request, false); + std::move(metadata_context), request, false, + include_peer_certificate); EXPECT_EQ("source", request.attributes().source().principal()); EXPECT_EQ("destination", request.attributes().destination().principal()); @@ -67,6 +68,12 @@ class CheckRequestUtilsTest : public testing::Test { .fields() .at("foo") .string_value()); + + if (include_peer_certificate) { + EXPECT_EQ(cert_data_, request.attributes().source().certificate()); + } else { + EXPECT_EQ(0, request.attributes().source().certificate().size()); + } } static Buffer::InstancePtr newTestBuffer(uint64_t size) { @@ -88,9 +95,11 @@ class CheckRequestUtilsTest : public testing::Test { std::shared_ptr> ssl_; NiceMock req_info_; Buffer::InstancePtr buffer_; + const std::string cert_data_{"cert-data"}; }; // Verify that createTcpCheck's dependencies are invoked when it's called. +// Verify that the source certificate is not set by default. TEST_F(CheckRequestUtilsTest, BasicTcp) { envoy::service::auth::v2::CheckRequest request; EXPECT_CALL(net_callbacks_, connection()).Times(2).WillRepeatedly(ReturnRef(connection_)); @@ -101,7 +110,27 @@ TEST_F(CheckRequestUtilsTest, BasicTcp) { EXPECT_CALL(*ssl_, uriSanLocalCertificate()) .WillOnce(Return(std::vector{"destination"})); - CheckRequestUtils::createTcpCheck(&net_callbacks_, request); + CheckRequestUtils::createTcpCheck(&net_callbacks_, request, false); + + EXPECT_EQ(request.attributes().source().certificate().size(), 0); +} + +// Verify that createTcpCheck's dependencies are invoked when it's called. +// Verify that createTcpCheck populates the source certificate correctly. +TEST_F(CheckRequestUtilsTest, TcpPeerCertificate) { + envoy::service::auth::v2::CheckRequest request; + EXPECT_CALL(net_callbacks_, connection()).Times(2).WillRepeatedly(ReturnRef(connection_)); + EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(Const(connection_), ssl()).Times(2).WillRepeatedly(Return(ssl_)); + EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); + EXPECT_CALL(*ssl_, uriSanLocalCertificate()) + .WillOnce(Return(std::vector{"destination"})); + EXPECT_CALL(*ssl_, urlEncodedPemEncodedPeerCertificate()).WillOnce(ReturnRef(cert_data_)); + + CheckRequestUtils::createTcpCheck(&net_callbacks_, request, true); + + EXPECT_EQ(cert_data_, request.attributes().source().certificate()); } // Verify that createHttpCheck's dependencies are invoked when it's called. @@ -121,7 +150,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttp) { expectBasicHttp(); CheckRequestUtils::createHttpCheck(&callbacks_, request_headers, Protobuf::Map(), - envoy::api::v2::core::Metadata(), request_, size); + envoy::api::v2::core::Metadata(), request_, size, false); ASSERT_EQ(size, request_.attributes().request().http().body().size()); EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body()); EXPECT_EQ(request_.attributes().request().http().headers().end(), @@ -141,7 +170,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) { expectBasicHttp(); CheckRequestUtils::createHttpCheck(&callbacks_, headers_, Protobuf::Map(), - envoy::api::v2::core::Metadata(), request_, size); + envoy::api::v2::core::Metadata(), request_, size, false); ASSERT_EQ(size, request_.attributes().request().http().body().size()); EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body()); EXPECT_EQ("true", request_.attributes().request().http().headers().at( @@ -157,9 +186,9 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithFullBody) { EXPECT_CALL(*ssl_, uriSanLocalCertificate()) .WillOnce(Return(std::vector{"destination"})); expectBasicHttp(); - CheckRequestUtils::createHttpCheck(&callbacks_, headers_, - Protobuf::Map(), - envoy::api::v2::core::Metadata(), request_, buffer_->length()); + CheckRequestUtils::createHttpCheck( + &callbacks_, headers_, Protobuf::Map(), + envoy::api::v2::core::Metadata(), request_, buffer_->length(), false); ASSERT_EQ(buffer_->length(), request_.attributes().request().http().body().size()); EXPECT_EQ(buffer_->toString().substr(0, buffer_->length()), request_.attributes().request().http().body()); @@ -169,6 +198,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithFullBody) { // Verify that createHttpCheck extract the proper attributes from the http request into CheckRequest // proto object. +// Verify that the source certificate is not set by default. TEST_F(CheckRequestUtilsTest, CheckAttrContextPeer) { Http::TestHeaderMapImpl request_headers{{"x-envoy-downstream-service-cluster", "foo"}, {":path", "/bar"}}; @@ -185,7 +215,7 @@ TEST_F(CheckRequestUtilsTest, CheckAttrContextPeer) { EXPECT_CALL(*ssl_, uriSanLocalCertificate()) .WillOnce(Return(std::vector{"destination"})); - callHttpCheckAndValidateRequestAttributes(); + callHttpCheckAndValidateRequestAttributes(false); } // Verify that createHttpCheck extract the attributes from the HTTP request into CheckRequest @@ -197,7 +227,7 @@ TEST_F(CheckRequestUtilsTest, CheckAttrContextPeerUriSans) { EXPECT_CALL(*ssl_, uriSanLocalCertificate()) .WillOnce(Return(std::vector{"destination"})); - callHttpCheckAndValidateRequestAttributes(); + callHttpCheckAndValidateRequestAttributes(false); } // Verify that createHttpCheck extract the attributes from the HTTP request into CheckRequest @@ -215,7 +245,7 @@ TEST_F(CheckRequestUtilsTest, CheckAttrContextPeerDnsSans) { Protobuf::Map context_extensions; context_extensions["key"] = "value"; - callHttpCheckAndValidateRequestAttributes(); + callHttpCheckAndValidateRequestAttributes(false); } // Verify that createHttpCheck extract the attributes from the HTTP request into CheckRequest @@ -233,7 +263,19 @@ TEST_F(CheckRequestUtilsTest, CheckAttrContextSubject) { std::string subject_local = "destination"; EXPECT_CALL(*ssl_, subjectLocalCertificate()).WillOnce(ReturnRef(subject_local)); - callHttpCheckAndValidateRequestAttributes(); + callHttpCheckAndValidateRequestAttributes(false); +} + +// Verify that the source certificate is populated correctly. +TEST_F(CheckRequestUtilsTest, CheckAttrContextPeerCertificate) { + expectBasicHttp(); + + EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); + EXPECT_CALL(*ssl_, uriSanLocalCertificate()) + .WillOnce(Return(std::vector{"destination"})); + EXPECT_CALL(*ssl_, urlEncodedPemEncodedPeerCertificate()).WillOnce(ReturnRef(cert_data_)); + + callHttpCheckAndValidateRequestAttributes(true); } } // namespace From 28d49e3fa334bd04a6d748bb89e8e13b4f415c4d Mon Sep 17 00:00:00 2001 From: Fred Douglas <43351173+fredlas@users.noreply.github.com> Date: Mon, 11 Nov 2019 14:49:19 -0500 Subject: [PATCH 11/14] config: regression test for wrong xDS gRPC method descriptor (#8945) Signed-off-by: Fred Douglas --- test/integration/cds_integration_test.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index d3013870ea94..4c8f7184f53d 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -106,6 +106,17 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht registerTestServerPorts({"http"}); } + // Regression test to catch the code declaring a gRPC service method for {SotW,delta} + // when the user's bootstrap config asks for the other type. + void verifyGrpcServiceMethod() { + EXPECT_TRUE(xds_stream_->waitForHeadersComplete()); + Envoy::Http::LowerCaseString path_string(":path"); + std::string expected_method(sotwOrDelta() == Grpc::SotwOrDelta::Sotw + ? "/envoy.api.v2.ClusterDiscoveryService/StreamClusters" + : "/envoy.api.v2.ClusterDiscoveryService/DeltaClusters"); + EXPECT_EQ(xds_stream_->headers().get(path_string)->value(), expected_method); + } + void acceptXdsConnection() { AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection. fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_); @@ -113,6 +124,7 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); RELEASE_ASSERT(result, result.message()); xds_stream_->startGrpcStream(); + verifyGrpcServiceMethod(); fake_upstreams_[0]->set_allow_unexpected_disconnects(true); } From a36c4684f235fdcabfd12fafae697e7d7f777b57 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Tue, 12 Nov 2019 05:47:49 +0900 Subject: [PATCH 12/14] grpc: Allow user defined grpc status (#8886) Description: This fix is a branch of issue #8053. User defined grpc-status is allow on grpc specification so that it is truly handled on envoy grpc-json transcoding Risk Level: Low Testing: Unit Test Docs Changes: none Release Notes: none Signed-off-by: Shikugawa --- include/envoy/grpc/status.h | 6 +- source/common/access_log/access_log_impl.cc | 4 +- .../common/config/delta_subscription_state.cc | 4 +- source/common/config/grpc_mux_impl.cc | 2 +- source/common/grpc/async_client_impl.cc | 22 +++---- source/common/grpc/common.cc | 11 ++-- source/common/grpc/common.h | 5 +- .../common/grpc/google_async_client_impl.cc | 16 ++--- source/common/grpc/google_async_client_impl.h | 2 +- source/common/grpc/status.cc | 48 +++++++-------- source/common/grpc/typed_async_client.h | 2 +- source/common/router/router.cc | 1 - source/common/tracing/http_tracer_impl.cc | 2 +- source/common/upstream/health_checker_impl.cc | 23 +++---- .../common/ext_authz/ext_authz_grpc_impl.cc | 4 +- .../common/ratelimit/ratelimit_impl.cc | 2 +- .../http/grpc_http1_reverse_bridge/filter.cc | 6 +- .../json_transcoder_filter.cc | 8 +-- .../filters/http/ratelimit/ratelimit.h | 2 +- .../common/access_log/access_log_impl_test.cc | 4 +- .../config/delta_subscription_impl_test.cc | 12 ++-- .../config/delta_subscription_state_test.cc | 8 +-- .../config/delta_subscription_test_harness.h | 8 +-- test/common/config/grpc_mux_impl_test.cc | 8 +-- test/common/config/grpc_stream_test.cc | 2 +- .../config/grpc_subscription_impl_test.cc | 4 +- .../config/grpc_subscription_test_harness.h | 9 +-- test/common/grpc/async_client_impl_test.cc | 10 ++-- test/common/grpc/common_test.cc | 60 +++++++++++-------- .../grpc/google_async_client_impl_test.cc | 4 +- .../grpc/grpc_client_integration_test.cc | 52 ++++++++-------- .../grpc_client_integration_test_harness.h | 7 ++- test/common/http/utility_test.cc | 16 ++--- .../upstream/load_stats_reporter_test.cc | 2 +- .../ext_authz/ext_authz_grpc_impl_test.cc | 17 +++--- .../filters/common/ext_authz/test_common.cc | 2 +- .../filters/common/ext_authz/test_common.h | 10 ++-- test/integration/ads_integration_test.cc | 10 ++-- test/integration/integration.cc | 2 +- test/integration/integration.h | 21 ++++--- 40 files changed, 231 insertions(+), 207 deletions(-) diff --git a/include/envoy/grpc/status.h b/include/envoy/grpc/status.h index dbc7c0a016f1..027ecd19f5db 100644 --- a/include/envoy/grpc/status.h +++ b/include/envoy/grpc/status.h @@ -5,9 +5,11 @@ namespace Grpc { class Status { public: + using GrpcStatus = int64_t; + // If this enum is changed, then the std::unordered_map in Envoy::Grpc::Utility::nameToGrpcStatus // located at: //source/common/access_log/grpc/status.cc must also be changed. - enum GrpcStatus { + enum WellKnownGrpcStatus { // The RPC completed successfully. Ok = 0, // The RPC was canceled. @@ -44,7 +46,7 @@ class Status { Unauthenticated = 16, // Maximum value of valid status codes. - MaximumValid = Unauthenticated, + MaximumKnown = Unauthenticated, // This is a non-GRPC error code, indicating the status code in gRPC headers // was invalid. diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index 837842f621ae..7613e3ba9ae0 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -240,7 +240,7 @@ bool GrpcStatusFilter::evaluate(const StreamInfo::StreamInfo& info, const Http:: // 2. response_headers gRPC status, if it exists. // 3. Inferred from info HTTP status, if it exists. // - // If none of those options exist, it will default to Grpc::Status::GrpcStatus::Unknown. + // If none of those options exist, it will default to Grpc::Status::WellKnownGrpcStatus::Unknown. const std::array, 3> optional_statuses = {{ {Grpc::Common::getGrpcStatus(response_trailers)}, {Grpc::Common::getGrpcStatus(response_headers)}, @@ -249,7 +249,7 @@ bool GrpcStatusFilter::evaluate(const StreamInfo::StreamInfo& info, const Http:: : absl::nullopt}, }}; - Grpc::Status::GrpcStatus status = Grpc::Status::GrpcStatus::Unknown; + Grpc::Status::GrpcStatus status = Grpc::Status::WellKnownGrpcStatus::Unknown; for (const auto& optional_status : optional_statuses) { if (optional_status.has_value()) { status = optional_status.value(); diff --git a/source/common/config/delta_subscription_state.cc b/source/common/config/delta_subscription_state.cc index 93ddc18164da..8fdfdca0ffca 100644 --- a/source/common/config/delta_subscription_state.cc +++ b/source/common/config/delta_subscription_state.cc @@ -110,7 +110,7 @@ void DeltaSubscriptionState::handleGoodResponse( void DeltaSubscriptionState::handleBadResponse(const EnvoyException& e, UpdateAck& ack) { // Note that error_detail being set is what indicates that a DeltaDiscoveryRequest is a NACK. - ack.error_detail_.set_code(Grpc::Status::GrpcStatus::Internal); + ack.error_detail_.set_code(Grpc::Status::WellKnownGrpcStatus::Internal); ack.error_detail_.set_message(e.what()); disableInitFetchTimeoutTimer(); ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); @@ -158,7 +158,7 @@ envoy::api::v2::DeltaDiscoveryRequest DeltaSubscriptionState::getNextRequestWithAck(const UpdateAck& ack) { envoy::api::v2::DeltaDiscoveryRequest request = getNextRequestAckless(); request.set_response_nonce(ack.nonce_); - if (ack.error_detail_.code() != Grpc::Status::GrpcStatus::Ok) { + if (ack.error_detail_.code() != Grpc::Status::WellKnownGrpcStatus::Ok) { // Don't needlessly make the field present-but-empty if status is ok. request.mutable_error_detail()->CopyFrom(ack.error_detail_); } diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index fdb3fa603758..3246dfbbd189 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -201,7 +201,7 @@ void GrpcMuxImpl::onDiscoveryResponse( Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); } ::google::rpc::Status* error_detail = api_state_[type_url].request_.mutable_error_detail(); - error_detail->set_code(Grpc::Status::GrpcStatus::Internal); + error_detail->set_code(Grpc::Status::WellKnownGrpcStatus::Internal); error_detail->set_message(e.what()); } api_state_[type_url].request_.set_response_nonce(message->nonce()); diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index 5c42c0e91d34..c4f99bc98852 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -64,7 +64,7 @@ AsyncStreamImpl::AsyncStreamImpl(AsyncClientImpl& parent, absl::string_view serv void AsyncStreamImpl::initialize(bool buffer_body_for_retry) { if (parent_.cm_.get(parent_.remote_cluster_name_) == nullptr) { - callbacks_.onRemoteClose(Status::GrpcStatus::Unavailable, "Cluster not available"); + callbacks_.onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "Cluster not available"); http_reset_ = true; return; } @@ -74,7 +74,7 @@ void AsyncStreamImpl::initialize(bool buffer_body_for_retry) { stream_ = http_async_client.start(*this, options_.setBufferBodyForRetry(buffer_body_for_retry)); if (stream_ == nullptr) { - callbacks_.onRemoteClose(Status::GrpcStatus::Unavailable, EMPTY_STRING); + callbacks_.onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, EMPTY_STRING); http_reset_ = true; return; } @@ -112,8 +112,8 @@ void AsyncStreamImpl::onHeaders(Http::HeaderMapPtr&& headers, bool end_stream) { // Technically this should be // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md // as given by Grpc::Utility::httpToGrpcStatus(), but the Google gRPC client treats - // this as GrpcStatus::Canceled. - streamError(Status::GrpcStatus::Canceled); + // this as WellKnownGrpcStatus::Canceled. + streamError(Status::WellKnownGrpcStatus::Canceled); return; } if (end_stream) { @@ -124,24 +124,24 @@ void AsyncStreamImpl::onHeaders(Http::HeaderMapPtr&& headers, bool end_stream) { void AsyncStreamImpl::onData(Buffer::Instance& data, bool end_stream) { decoded_frames_.clear(); if (!decoder_.decode(data, decoded_frames_)) { - streamError(Status::GrpcStatus::Internal); + streamError(Status::WellKnownGrpcStatus::Internal); return; } for (auto& frame : decoded_frames_) { if (frame.length_ > 0 && frame.flags_ != GRPC_FH_DEFAULT) { - streamError(Status::GrpcStatus::Internal); + streamError(Status::WellKnownGrpcStatus::Internal); return; } if (!callbacks_.onReceiveMessageRaw(frame.data_ ? std::move(frame.data_) : std::make_unique())) { - streamError(Status::GrpcStatus::Internal); + streamError(Status::WellKnownGrpcStatus::Internal); return; } } if (end_stream) { - streamError(Status::GrpcStatus::Unknown); + streamError(Status::WellKnownGrpcStatus::Unknown); } } @@ -152,7 +152,7 @@ void AsyncStreamImpl::onTrailers(Http::HeaderMapPtr&& trailers) { const std::string grpc_message = Common::getGrpcMessage(*trailers); callbacks_.onReceiveTrailingMetadata(std::move(trailers)); if (!grpc_status) { - grpc_status = Status::GrpcStatus::Unknown; + grpc_status = Status::WellKnownGrpcStatus::Unknown; } callbacks_.onRemoteClose(grpc_status.value(), grpc_message); cleanup(); @@ -174,7 +174,7 @@ void AsyncStreamImpl::onReset() { } http_reset_ = true; - streamError(Status::GrpcStatus::Internal); + streamError(Status::WellKnownGrpcStatus::Internal); } void AsyncStreamImpl::sendMessage(const Protobuf::Message& request, bool end_stream) { @@ -252,7 +252,7 @@ void AsyncRequestImpl::onReceiveTrailingMetadata(Http::HeaderMapPtr&&) {} void AsyncRequestImpl::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) { current_span_->setTag(Tracing::Tags::get().GrpcStatusCode, std::to_string(status)); - if (status != Grpc::Status::GrpcStatus::Ok) { + if (status != Grpc::Status::WellKnownGrpcStatus::Ok) { current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); callbacks_.onFailure(status, message, *current_span_); } else if (response_ == nullptr) { diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index 54c4b3b9639c..72a3894e7700 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -48,16 +48,17 @@ bool Common::isGrpcResponseHeader(const Http::HeaderMap& headers, bool end_strea return hasGrpcContentType(headers); } -absl::optional Common::getGrpcStatus(const Http::HeaderMap& trailers) { +absl::optional Common::getGrpcStatus(const Http::HeaderMap& trailers, + bool allow_user_defined) { const Http::HeaderEntry* grpc_status_header = trailers.GrpcStatus(); - uint64_t grpc_status_code; + if (!grpc_status_header || grpc_status_header->value().empty()) { return absl::nullopt; } if (!absl::SimpleAtoi(grpc_status_header->value().getStringView(), &grpc_status_code) || - grpc_status_code > Status::GrpcStatus::MaximumValid) { - return {Status::GrpcStatus::InvalidCode}; + (grpc_status_code > Status::WellKnownGrpcStatus::MaximumKnown && !allow_user_defined)) { + return {Status::WellKnownGrpcStatus::InvalidCode}; } return {static_cast(grpc_status_code)}; } @@ -222,7 +223,7 @@ void Common::checkForHeaderOnlyError(Http::Message& http_response) { return; } - if (grpc_status_code.value() == Status::GrpcStatus::InvalidCode) { + if (grpc_status_code.value() == Status::WellKnownGrpcStatus::InvalidCode) { throw Exception(absl::optional(), "bad grpc-status header"); } diff --git a/source/common/grpc/common.h b/source/common/grpc/common.h index e5939eaee45f..21bffc5c74ff 100644 --- a/source/common/grpc/common.h +++ b/source/common/grpc/common.h @@ -45,10 +45,13 @@ class Common { /** * Returns the GrpcStatus code from a given set of trailers, if present. * @param trailers the trailers to parse. + * @param allow_user_status whether allow user defined grpc status. + * if this value is false, custom grpc status is regarded as invalid status * @return absl::optional the parsed status code or InvalidCode if no valid * status is found. */ - static absl::optional getGrpcStatus(const Http::HeaderMap& trailers); + static absl::optional getGrpcStatus(const Http::HeaderMap& trailers, + bool allow_user_defined = false); /** * Returns the grpc-message from a given set of trailers, if present. diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index abc17b32f44c..17007ba260c2 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -84,7 +84,7 @@ GoogleAsyncClientImpl::GoogleAsyncClientImpl(Event::Dispatcher& dispatcher, stub_ = stub_factory.createStub(channel); // Initialize client stats. stats_.streams_total_ = &scope_->counter("streams_total"); - for (uint32_t i = 0; i <= Status::GrpcStatus::MaximumValid; ++i) { + for (uint32_t i = 0; i <= Status::WellKnownGrpcStatus::MaximumKnown; ++i) { stats_.streams_closed_[i] = &scope_->counter(fmt::format("streams_closed_{}", i)); } } @@ -177,7 +177,7 @@ void GoogleAsyncStreamImpl::initialize(bool /*buffer_body_for_retry*/) { rw_ = parent_.stub_->PrepareCall(&ctxt_, "/" + service_full_name_ + "/" + method_name_, &parent_.tls_.completionQueue()); if (rw_ == nullptr) { - notifyRemoteClose(Status::GrpcStatus::Unavailable, nullptr, EMPTY_STRING); + notifyRemoteClose(Status::WellKnownGrpcStatus::Unavailable, nullptr, EMPTY_STRING); call_failed_ = true; return; } @@ -189,12 +189,12 @@ void GoogleAsyncStreamImpl::initialize(bool /*buffer_body_for_retry*/) { void GoogleAsyncStreamImpl::notifyRemoteClose(Status::GrpcStatus grpc_status, Http::HeaderMapPtr trailing_metadata, const std::string& message) { - if (grpc_status > Status::GrpcStatus::MaximumValid || grpc_status < 0) { + if (grpc_status > Status::WellKnownGrpcStatus::MaximumKnown || grpc_status < 0) { ENVOY_LOG(error, "notifyRemoteClose invalid gRPC status code {}", grpc_status); // Set the grpc_status as InvalidCode but increment the Unknown stream to avoid out-of-range // crash.. - grpc_status = Status::GrpcStatus::InvalidCode; - parent_.stats_.streams_closed_[Status::GrpcStatus::Unknown]->inc(); + grpc_status = Status::WellKnownGrpcStatus::InvalidCode; + parent_.stats_.streams_closed_[Status::WellKnownGrpcStatus::Unknown]->inc(); } else { parent_.stats_.streams_closed_[grpc_status]->inc(); } @@ -272,7 +272,7 @@ void GoogleAsyncStreamImpl::handleOpCompletion(GoogleAsyncTag::Operation op, boo // Early fails can be just treated as Internal. if (op == GoogleAsyncTag::Operation::Init || op == GoogleAsyncTag::Operation::ReadInitialMetadata) { - notifyRemoteClose(Status::GrpcStatus::Internal, nullptr, EMPTY_STRING); + notifyRemoteClose(Status::WellKnownGrpcStatus::Internal, nullptr, EMPTY_STRING); resetStream(); return; } @@ -324,7 +324,7 @@ void GoogleAsyncStreamImpl::handleOpCompletion(GoogleAsyncTag::Operation op, boo auto buffer = GoogleGrpcUtils::makeBufferInstance(read_buf_); if (!buffer || !callbacks_.onReceiveMessageRaw(std::move(buffer))) { // This is basically streamError in Grpc::AsyncClientImpl. - notifyRemoteClose(Status::GrpcStatus::Internal, nullptr, EMPTY_STRING); + notifyRemoteClose(Status::WellKnownGrpcStatus::Internal, nullptr, EMPTY_STRING); resetStream(); break; } @@ -438,7 +438,7 @@ void GoogleAsyncRequestImpl::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) { current_span_->setTag(Tracing::Tags::get().GrpcStatusCode, std::to_string(status)); - if (status != Grpc::Status::GrpcStatus::Ok) { + if (status != Grpc::Status::WellKnownGrpcStatus::Ok) { current_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); callbacks_.onFailure(status, message, *current_span_); } else if (response_ == nullptr) { diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index edc633a08d4d..1026780cdba6 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -118,7 +118,7 @@ struct GoogleAsyncClientStats { // .streams_total Stats::Counter* streams_total_; // .streams_closed_ - std::array streams_closed_; + std::array streams_closed_; }; // Interface to allow the gRPC stub to be mocked out by tests. diff --git a/source/common/grpc/status.cc b/source/common/grpc/status.cc index 70fdcce02711..be44ee8b9f8f 100644 --- a/source/common/grpc/status.cc +++ b/source/common/grpc/status.cc @@ -8,77 +8,77 @@ Status::GrpcStatus Utility::httpToGrpcStatus(uint64_t http_response_status) { // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md. switch (http_response_status) { case 400: - return Status::GrpcStatus::Internal; + return Status::WellKnownGrpcStatus::Internal; case 401: - return Status::GrpcStatus::Unauthenticated; + return Status::WellKnownGrpcStatus::Unauthenticated; case 403: - return Status::GrpcStatus::PermissionDenied; + return Status::WellKnownGrpcStatus::PermissionDenied; case 404: - return Status::GrpcStatus::Unimplemented; + return Status::WellKnownGrpcStatus::Unimplemented; case 429: case 502: case 503: case 504: - return Status::GrpcStatus::Unavailable; + return Status::WellKnownGrpcStatus::Unavailable; default: - return Status::GrpcStatus::Unknown; + return Status::WellKnownGrpcStatus::Unknown; } } uint64_t Utility::grpcToHttpStatus(Status::GrpcStatus grpc_status) { // From https://cloud.google.com/apis/design/errors#handling_errors. switch (grpc_status) { - case Status::GrpcStatus::Ok: + case Status::WellKnownGrpcStatus::Ok: return 200; - case Status::GrpcStatus::Canceled: + case Status::WellKnownGrpcStatus::Canceled: // Client closed request. return 499; - case Status::GrpcStatus::Unknown: + case Status::WellKnownGrpcStatus::Unknown: // Internal server error. return 500; - case Status::GrpcStatus::InvalidArgument: + case Status::WellKnownGrpcStatus::InvalidArgument: // Bad request. return 400; - case Status::GrpcStatus::DeadlineExceeded: + case Status::WellKnownGrpcStatus::DeadlineExceeded: // Gateway Time-out. return 504; - case Status::GrpcStatus::NotFound: + case Status::WellKnownGrpcStatus::NotFound: // Not found. return 404; - case Status::GrpcStatus::AlreadyExists: + case Status::WellKnownGrpcStatus::AlreadyExists: // Conflict. return 409; - case Status::GrpcStatus::PermissionDenied: + case Status::WellKnownGrpcStatus::PermissionDenied: // Forbidden. return 403; - case Status::GrpcStatus::ResourceExhausted: + case Status::WellKnownGrpcStatus::ResourceExhausted: // Too many requests. return 429; - case Status::GrpcStatus::FailedPrecondition: + case Status::WellKnownGrpcStatus::FailedPrecondition: // Bad request. return 400; - case Status::GrpcStatus::Aborted: + case Status::WellKnownGrpcStatus::Aborted: // Conflict. return 409; - case Status::GrpcStatus::OutOfRange: + case Status::WellKnownGrpcStatus::OutOfRange: // Bad request. return 400; - case Status::GrpcStatus::Unimplemented: + case Status::WellKnownGrpcStatus::Unimplemented: // Not implemented. return 501; - case Status::GrpcStatus::Internal: + case Status::WellKnownGrpcStatus::Internal: // Internal server error. return 500; - case Status::GrpcStatus::Unavailable: + case Status::WellKnownGrpcStatus::Unavailable: // Service unavailable. return 503; - case Status::GrpcStatus::DataLoss: + case Status::WellKnownGrpcStatus::DataLoss: // Internal server error. return 500; - case Status::GrpcStatus::Unauthenticated: + case Status::WellKnownGrpcStatus::Unauthenticated: // Unauthorized. return 401; - case Status::GrpcStatus::InvalidCode: + case Status::WellKnownGrpcStatus::InvalidCode: default: // Internal server error. return 500; diff --git a/source/common/grpc/typed_async_client.h b/source/common/grpc/typed_async_client.h index d1a95d41960e..72907e42e611 100644 --- a/source/common/grpc/typed_async_client.h +++ b/source/common/grpc/typed_async_client.h @@ -64,7 +64,7 @@ template class AsyncRequestCallbacks : public RawAsyncReques Internal::parseMessageUntyped(std::make_unique(), std::move(response)) .release())); if (!message) { - onFailure(Status::GrpcStatus::Internal, "", span); + onFailure(Status::WellKnownGrpcStatus::Internal, "", span); return; } onSuccess(std::move(message), span); diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 63f36fc0bbcf..87c1b686987e 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -565,7 +565,6 @@ Http::ConnectionPool::Instance* Filter::getConnPool() { // Choose protocol based on cluster configuration and downstream connection // Note: Cluster may downgrade HTTP2 to HTTP1 based on runtime configuration. Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); - transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState( callbacks_->streamInfo().filterState()); diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index a1e3ea860e74..8d23ecf6459e 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -94,7 +94,7 @@ static void addGrpcTags(Span& span, const Http::HeaderMap& headers) { } absl::optional grpc_status_code = Grpc::Common::getGrpcStatus(headers); // Set error tag when status is not OK. - if (grpc_status_code && grpc_status_code.value() != Grpc::Status::GrpcStatus::Ok) { + if (grpc_status_code && grpc_status_code.value() != Grpc::Status::WellKnownGrpcStatus::Ok) { span.setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); } } diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 4b64c362b5a6..05179cfeed79 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -545,7 +545,7 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeHeaders( return; } if (!Grpc::Common::hasGrpcContentType(*headers)) { - onRpcComplete(Grpc::Status::GrpcStatus::Internal, "invalid gRPC content-type", false); + onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, "invalid gRPC content-type", false); return; } if (end_stream) { @@ -556,7 +556,7 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeHeaders( onRpcComplete(grpc_status.value(), Grpc::Common::getGrpcMessage(*headers), true); return; } - onRpcComplete(Grpc::Status::GrpcStatus::Internal, + onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, "gRPC protocol violation: unexpected stream end", true); } } @@ -564,7 +564,7 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeHeaders( void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeData(Buffer::Instance& data, bool end_stream) { if (end_stream) { - onRpcComplete(Grpc::Status::GrpcStatus::Internal, + onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, "gRPC protocol violation: unexpected stream end", true); return; } @@ -572,13 +572,14 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeData(Buffer::Ins // We should end up with only one frame here. std::vector decoded_frames; if (!decoder_.decode(data, decoded_frames)) { - onRpcComplete(Grpc::Status::GrpcStatus::Internal, "gRPC wire protocol decode error", false); + onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, "gRPC wire protocol decode error", + false); } for (auto& frame : decoded_frames) { if (frame.length_ > 0) { if (health_check_response_) { // grpc.health.v1.Health.Check is unary RPC, so only one message is allowed. - onRpcComplete(Grpc::Status::GrpcStatus::Internal, "unexpected streaming", false); + onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, "unexpected streaming", false); return; } health_check_response_ = std::make_unique(); @@ -586,8 +587,8 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeData(Buffer::Ins if (frame.flags_ != Grpc::GRPC_FH_DEFAULT || !health_check_response_->ParseFromZeroCopyStream(&stream)) { - onRpcComplete(Grpc::Status::GrpcStatus::Internal, "invalid grpc.health.v1 RPC payload", - false); + onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, + "invalid grpc.health.v1 RPC payload", false); return; } } @@ -598,7 +599,9 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeTrailers( Http::HeaderMapPtr&& trailers) { auto maybe_grpc_status = Grpc::Common::getGrpcStatus(*trailers); auto grpc_status = - maybe_grpc_status ? maybe_grpc_status.value() : Grpc::Status::GrpcStatus::Internal; + maybe_grpc_status + ? maybe_grpc_status.value() + : static_cast(Grpc::Status::WellKnownGrpcStatus::Internal); const std::string grpc_message = maybe_grpc_status ? Grpc::Common::getGrpcMessage(*trailers) : "invalid gRPC status"; onRpcComplete(grpc_status, grpc_message, true); @@ -684,7 +687,7 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onGoAway() { bool GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::isHealthCheckSucceeded( Grpc::Status::GrpcStatus grpc_status) const { - if (grpc_status != Grpc::Status::GrpcStatus::Ok) { + if (grpc_status != Grpc::Status::WellKnownGrpcStatus::Ok) { return false; } @@ -757,7 +760,7 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::logHealthCheckStatus( } } std::string grpc_status_message; - if (grpc_status != Grpc::Status::GrpcStatus::Ok && !grpc_message.empty()) { + if (grpc_status != Grpc::Status::WellKnownGrpcStatus::Ok && !grpc_message.empty()) { grpc_status_message = fmt::format("{} ({})", grpc_status, grpc_message); } else { grpc_status_message = fmt::format("{}", grpc_status); diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index fb09b6d08c4a..749d75313c43 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -45,7 +45,7 @@ void GrpcClientImpl::check(RequestCallbacks& callbacks, void GrpcClientImpl::onSuccess(std::unique_ptr&& response, Tracing::Span& span) { ResponsePtr authz_response = std::make_unique(Response{}); - if (response->status().code() == Grpc::Status::GrpcStatus::Ok) { + if (response->status().code() == Grpc::Status::WellKnownGrpcStatus::Ok) { span.setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceOk); authz_response->status = CheckStatus::OK; if (response->has_ok_response()) { @@ -70,7 +70,7 @@ void GrpcClientImpl::onSuccess(std::unique_ptrcomplete(LimitStatus::Error, nullptr, nullptr); callbacks_ = nullptr; } diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index 4c5e9182cbd4..318b7331c2ac 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -33,7 +33,7 @@ Grpc::Status::GrpcStatus grpcStatusFromHeaders(Http::HeaderMap& headers) { // from the standard but is key in being able to transform a successful // upstream HTTP response into a gRPC response. if (http_response_status == 200) { - return Grpc::Status::GrpcStatus::Ok; + return Grpc::Status::WellKnownGrpcStatus::Ok; } else { return Grpc::Utility::httpToGrpcStatus(http_response_status); } @@ -111,7 +111,7 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& buffer, bool) { // Fail the request if the body is too small to possibly contain a gRPC frame. if (buffer.length() < Grpc::GRPC_FRAME_HEADER_SIZE) { decoder_callbacks_->sendLocalReply(Http::Code::OK, "invalid request body", nullptr, - Grpc::Status::GrpcStatus::Unknown, + Grpc::Status::WellKnownGrpcStatus::Unknown, RcDetails::get().GrpcBridgeFailedTooSmall); return Http::FilterDataStatus::StopIterationNoBuffer; } @@ -133,7 +133,7 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::HeaderMap& headers, bool) if (content_type == nullptr || content_type->value().getStringView() != upstream_content_type_) { headers.insertGrpcMessage().value(badContentTypeMessage(headers)); - headers.insertGrpcStatus().value(Envoy::Grpc::Status::GrpcStatus::Unknown); + headers.insertGrpcStatus().value(Envoy::Grpc::Status::WellKnownGrpcStatus::Unknown); headers.insertStatus().value(enumToInt(Http::Code::OK)); if (content_type != nullptr) { diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 00476e1b6b6b..6b52e7701802 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -465,7 +465,7 @@ Http::FilterTrailersStatus JsonTranscoderFilter::encodeTrailers(Http::HeaderMap& response_in_.finish(); const absl::optional grpc_status = - Grpc::Common::getGrpcStatus(trailers); + Grpc::Common::getGrpcStatus(trailers, true); if (grpc_status && maybeConvertGrpcStatus(*grpc_status, trailers)) { return Http::FilterTrailersStatus::Continue; } @@ -486,7 +486,7 @@ Http::FilterTrailersStatus JsonTranscoderFilter::encodeTrailers(Http::HeaderMap& // so there is no need to copy headers from one to the other. bool is_trailers_only_response = response_headers_ == &trailers; - if (!grpc_status || grpc_status.value() == Grpc::Status::GrpcStatus::InvalidCode) { + if (!grpc_status || grpc_status.value() == Grpc::Status::WellKnownGrpcStatus::InvalidCode) { response_headers_->Status()->value(enumToInt(Http::Code::ServiceUnavailable)); } else { response_headers_->Status()->value(Grpc::Utility::grpcToHttpStatus(grpc_status.value())); @@ -568,8 +568,8 @@ bool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_ return false; } - if (grpc_status == Grpc::Status::GrpcStatus::Ok || - grpc_status == Grpc::Status::GrpcStatus::InvalidCode) { + if (grpc_status == Grpc::Status::WellKnownGrpcStatus::Ok || + grpc_status == Grpc::Status::WellKnownGrpcStatus::InvalidCode) { return false; } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 19afcd2e4694..cc5e05e60612 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -45,7 +45,7 @@ class FilterConfig { failure_mode_deny_(config.failure_mode_deny()), rate_limited_grpc_status_( config.rate_limited_as_resource_exhausted() - ? absl::make_optional(Grpc::Status::GrpcStatus::ResourceExhausted) + ? absl::make_optional(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted) : absl::nullopt), http_context_(http_context), stat_names_(scope.symbolTable()) {} const std::string& domain() const { return domain_; } diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 7ebc5caf1416..14fd8129b83c 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -1050,7 +1050,7 @@ name: envoy.file_access_log )EOF"; const auto desc = envoy::config::filter::accesslog::v2::GrpcStatusFilter_Status_descriptor(); - const int grpcStatuses = static_cast(Grpc::Status::GrpcStatus::MaximumValid) + 1; + const int grpcStatuses = static_cast(Grpc::Status::WellKnownGrpcStatus::MaximumKnown) + 1; if (desc->value_count() != grpcStatuses) { FAIL() << "Mismatch in number of gRPC statuses, GrpcStatus has " << grpcStatuses << ", GrpcStatusFilter_Status has " << desc->value_count() << "."; @@ -1172,7 +1172,7 @@ name: envoy.file_access_log const InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); - for (int i = 0; i <= static_cast(Grpc::Status::GrpcStatus::MaximumValid); i++) { + for (int i = 0; i <= static_cast(Grpc::Status::WellKnownGrpcStatus::MaximumKnown); i++) { EXPECT_CALL(*file_, write(_)).Times(i == 0 ? 0 : 1); response_trailers_.addCopy(Http::Headers::get().GrpcStatus, std::to_string(i)); diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index 366d2eec6f58..e6b783b6583f 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -18,15 +18,15 @@ class DeltaSubscriptionImplTest : public DeltaSubscriptionTestHarness, public te TEST_F(DeltaSubscriptionImplTest, UpdateResourcesCausesRequest) { startSubscription({"name1", "name2", "name3"}); - expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::GrpcStatus::Ok, "", {}); + expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); subscription_->updateResourceInterest({"name3", "name4"}); - expectSendMessage({"name1", "name2"}, {}, Grpc::Status::GrpcStatus::Ok, "", {}); + expectSendMessage({"name1", "name2"}, {}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); subscription_->updateResourceInterest({"name1", "name2", "name3", "name4"}); - expectSendMessage({}, {"name1", "name2"}, Grpc::Status::GrpcStatus::Ok, "", {}); + expectSendMessage({}, {"name1", "name2"}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); subscription_->updateResourceInterest({"name3", "name4"}); - expectSendMessage({"name1", "name2"}, {}, Grpc::Status::GrpcStatus::Ok, "", {}); + expectSendMessage({"name1", "name2"}, {}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); subscription_->updateResourceInterest({"name1", "name2", "name3", "name4"}); - expectSendMessage({}, {"name1", "name2", "name3"}, Grpc::Status::GrpcStatus::Ok, "", {}); + expectSendMessage({}, {"name1", "name2", "name3"}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); subscription_->updateResourceInterest({"name4"}); } @@ -38,7 +38,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) { startSubscription({"name1", "name2", "name3"}); subscription_->pause(); - expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::GrpcStatus::Ok, "", {}); + expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); // If not for the pause, these updates would make the expectSendMessage fail due to too many // messages being sent. subscription_->updateResourceInterest({"name3", "name4"}); diff --git a/test/common/config/delta_subscription_state_test.cc b/test/common/config/delta_subscription_state_test.cc index 2a96f2b0953f..f98dfe5aa05e 100644 --- a/test/common/config/delta_subscription_state_test.cc +++ b/test/common/config/delta_subscription_state_test.cc @@ -174,7 +174,7 @@ TEST_F(DeltaSubscriptionStateTest, AckGenerated) { populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug1", "nonce1"); EXPECT_EQ("nonce1", ack.nonce_); - EXPECT_EQ(Grpc::Status::GrpcStatus::Ok, ack.error_detail_.code()); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); } // The next response updates 1 and 2, and adds 3. { @@ -182,7 +182,7 @@ TEST_F(DeltaSubscriptionStateTest, AckGenerated) { {{"name1", "version1B"}, {"name2", "version2B"}, {"name3", "version3A"}}); UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug2", "nonce2"); EXPECT_EQ("nonce2", ack.nonce_); - EXPECT_EQ(Grpc::Status::GrpcStatus::Ok, ack.error_detail_.code()); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); } // The next response tries but fails to update all 3, and so should produce a NACK. { @@ -190,7 +190,7 @@ TEST_F(DeltaSubscriptionStateTest, AckGenerated) { {{"name1", "version1C"}, {"name2", "version2C"}, {"name3", "version3B"}}); UpdateAck ack = deliverBadDiscoveryResponse(added_resources, {}, "debug3", "nonce3"); EXPECT_EQ("nonce3", ack.nonce_); - EXPECT_NE(Grpc::Status::GrpcStatus::Ok, ack.error_detail_.code()); + EXPECT_NE(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); } // The last response successfully updates all 3. { @@ -198,7 +198,7 @@ TEST_F(DeltaSubscriptionStateTest, AckGenerated) { {{"name1", "version1D"}, {"name2", "version2D"}, {"name3", "version3C"}}); UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug4", "nonce4"); EXPECT_EQ("nonce4", ack.nonce_); - EXPECT_EQ(Grpc::Status::GrpcStatus::Ok, ack.error_detail_.code()); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); } } diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index 720b771e65dd..bf35490d4b90 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -76,7 +76,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { bool expect_node = false) override { UNREFERENCED_PARAMETER(version); UNREFERENCED_PARAMETER(expect_node); - expectSendMessage(cluster_names, {}, Grpc::Status::GrpcStatus::Ok, "", {}); + expectSendMessage(cluster_names, {}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); } void expectSendMessage(const std::set& subscribe, @@ -101,7 +101,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { (*expected_request.mutable_initial_resource_versions())[resource.first] = resource.second; } - if (error_code != Grpc::Status::GrpcStatus::Ok) { + if (error_code != Grpc::Status::WellKnownGrpcStatus::Ok) { ::google::rpc::Status* error_detail = expected_request.mutable_error_detail(); error_detail->set_code(error_code); error_detail->set_message(error_message); @@ -146,7 +146,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { } else { EXPECT_CALL(callbacks_, onConfigUpdateFailed( Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); - expectSendMessage({}, {}, Grpc::Status::GrpcStatus::Internal, "bad config", {}); + expectSendMessage({}, {}, Grpc::Status::WellKnownGrpcStatus::Internal, "bad config", {}); } static_cast(subscription_->getContextForTest().get()) ->onDiscoveryResponse(std::move(response)); @@ -163,7 +163,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { cluster_names.begin(), cluster_names.end(), std::inserter(unsub, unsub.begin())); - expectSendMessage(sub, unsub, Grpc::Status::GrpcStatus::Ok, "", {}); + expectSendMessage(sub, unsub, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); subscription_->updateResourceInterest(cluster_names); last_cluster_names_ = cluster_names; } diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index 7ef57acb1a8c..c9fb873d6158 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -66,7 +66,7 @@ class GrpcMuxImplTestBase : public testing::Test { void expectSendMessage(const std::string& type_url, const std::vector& resource_names, const std::string& version, bool first = false, const std::string& nonce = "", - const Protobuf::int32 error_code = Grpc::Status::GrpcStatus::Ok, + const Protobuf::int32 error_code = Grpc::Status::WellKnownGrpcStatus::Ok, const std::string& error_message = "") { envoy::api::v2::DiscoveryRequest expected_request; if (first) { @@ -80,7 +80,7 @@ class GrpcMuxImplTestBase : public testing::Test { } expected_request.set_response_nonce(nonce); expected_request.set_type_url(type_url); - if (error_code != Grpc::Status::GrpcStatus::Ok) { + if (error_code != Grpc::Status::WellKnownGrpcStatus::Ok) { ::google::rpc::Status* error_detail = expected_request.mutable_error_detail(); error_detail->set_code(error_code); error_detail->set_message(error_message); @@ -163,7 +163,7 @@ TEST_F(GrpcMuxImplTest, ResetStream) { EXPECT_CALL(random_, random()); ASSERT_TRUE(timer != nullptr); // initialized from dispatcher mock. EXPECT_CALL(*timer, enableTimer(_, _)); - grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::GrpcStatus::Canceled, ""); + grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); EXPECT_EQ(0, control_plane_connected_state_.value()); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); @@ -227,7 +227,7 @@ TEST_F(GrpcMuxImplTest, TypeUrlMismatch) { })); expectSendMessage( - "foo", {"x", "y"}, "", false, "", Grpc::Status::GrpcStatus::Internal, + "foo", {"x", "y"}, "", false, "", Grpc::Status::WellKnownGrpcStatus::Internal, fmt::format("bar does not match the message-wide type URL foo in DiscoveryResponse {}", invalid_response->DebugString())); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(invalid_response)); diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc index 0c37a2264259..bb575711ea20 100644 --- a/test/common/config/grpc_stream_test.cc +++ b/test/common/config/grpc_stream_test.cc @@ -59,7 +59,7 @@ TEST_F(GrpcStreamTest, EstablishStream) { grpc_stream_.establishNewStream(); EXPECT_TRUE(grpc_stream_.grpcStreamAvailable()); } - grpc_stream_.onRemoteClose(Grpc::Status::GrpcStatus::Ok, ""); + grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Ok, ""); EXPECT_FALSE(grpc_stream_.grpcStreamAvailable()); // Successful re-establishment { diff --git a/test/common/config/grpc_subscription_impl_test.cc b/test/common/config/grpc_subscription_impl_test.cc index 9664aed24e9e..f7bed5838555 100644 --- a/test/common/config/grpc_subscription_impl_test.cc +++ b/test/common/config/grpc_subscription_impl_test.cc @@ -46,8 +46,8 @@ TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { .Times(0); EXPECT_CALL(*timer_, enableTimer(_, _)); EXPECT_CALL(random_, random()); - subscription_->grpcMux()->grpcStreamForTest().onRemoteClose(Grpc::Status::GrpcStatus::Canceled, - ""); + subscription_->grpcMux()->grpcStreamForTest().onRemoteClose( + Grpc::Status::WellKnownGrpcStatus::Canceled, ""); EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0)); verifyControlPlaneStats(0); diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index b5cb517c4b83..476230d68cb5 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -52,7 +52,8 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { void expectSendMessage(const std::set& cluster_names, const std::string& version, bool expect_node = false) override { - expectSendMessage(cluster_names, version, expect_node, Grpc::Status::GrpcStatus::Ok, ""); + expectSendMessage(cluster_names, version, expect_node, Grpc::Status::WellKnownGrpcStatus::Ok, + ""); } void expectSendMessage(const std::set& cluster_names, const std::string& version, @@ -71,7 +72,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { } expected_request.set_response_nonce(last_response_nonce_); expected_request.set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); - if (error_code != Grpc::Status::GrpcStatus::Ok) { + if (error_code != Grpc::Status::WellKnownGrpcStatus::Ok) { ::google::rpc::Status* error_detail = expected_request.mutable_error_detail(); error_detail->set_code(error_code); error_detail->set_message(error_message); @@ -111,8 +112,8 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { } else { EXPECT_CALL(callbacks_, onConfigUpdateFailed( Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); - expectSendMessage(last_cluster_names_, version_, false, Grpc::Status::GrpcStatus::Internal, - "bad config"); + expectSendMessage(last_cluster_names_, version_, false, + Grpc::Status::WellKnownGrpcStatus::Internal, "bad config"); } subscription_->grpcMux()->onDiscoveryResponse(std::move(response)); Mock::VerifyAndClearExpectations(&async_stream_); diff --git a/test/common/grpc/async_client_impl_test.cc b/test/common/grpc/async_client_impl_test.cc index 15d65f60ffba..72e5d6411584 100644 --- a/test/common/grpc/async_client_impl_test.cc +++ b/test/common/grpc/async_client_impl_test.cc @@ -41,7 +41,7 @@ class EnvoyAsyncClientImplTest : public testing::Test { TEST_F(EnvoyAsyncClientImplTest, StreamHttpStartFail) { MockAsyncStreamCallbacks grpc_callbacks; ON_CALL(http_client_, start(_, _)).WillByDefault(Return(nullptr)); - EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::GrpcStatus::Unavailable, "")); + EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); EXPECT_TRUE(grpc_stream == nullptr); @@ -52,7 +52,7 @@ TEST_F(EnvoyAsyncClientImplTest, StreamHttpStartFail) { TEST_F(EnvoyAsyncClientImplTest, RequestHttpStartFail) { MockAsyncRequestCallbacks grpc_callbacks; ON_CALL(http_client_, start(_, _)).WillByDefault(Return(nullptr)); - EXPECT_CALL(grpc_callbacks, onFailure(Status::GrpcStatus::Unavailable, "", _)); + EXPECT_CALL(grpc_callbacks, onFailure(Status::WellKnownGrpcStatus::Unavailable, "", _)); helloworld::HelloRequest request_msg; Tracing::MockSpan active_span; @@ -93,7 +93,7 @@ TEST_F(EnvoyAsyncClientImplTest, StreamHttpSendHeadersFail) { http_callbacks->onReset(); })); EXPECT_CALL(grpc_callbacks, onReceiveTrailingMetadata_(_)); - EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::GrpcStatus::Internal, "")); + EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Internal, "")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); EXPECT_TRUE(grpc_stream == nullptr); @@ -119,7 +119,7 @@ TEST_F(EnvoyAsyncClientImplTest, RequestHttpSendHeadersFail) { UNREFERENCED_PARAMETER(end_stream); http_callbacks->onReset(); })); - EXPECT_CALL(grpc_callbacks, onFailure(Status::GrpcStatus::Internal, "", _)); + EXPECT_CALL(grpc_callbacks, onFailure(Status::WellKnownGrpcStatus::Internal, "", _)); helloworld::HelloRequest request_msg; Tracing::MockSpan active_span; @@ -145,7 +145,7 @@ TEST_F(EnvoyAsyncClientImplTest, StreamHttpClientException) { MockAsyncStreamCallbacks grpc_callbacks; ON_CALL(cm_, get(_)).WillByDefault(Return(nullptr)); EXPECT_CALL(grpc_callbacks, - onRemoteClose(Status::GrpcStatus::Unavailable, "Cluster not available")); + onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "Cluster not available")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); EXPECT_TRUE(grpc_stream == nullptr); diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc index ec3f6d7cff91..88761b402676 100644 --- a/test/common/grpc/common_test.cc +++ b/test/common/grpc/common_test.cc @@ -30,6 +30,12 @@ TEST(GrpcContextTest, GetGrpcStatus) { Http::TestHeaderMapImpl invalid_trailers{{"grpc-status", "-1"}}; EXPECT_EQ(Status::InvalidCode, Common::getGrpcStatus(invalid_trailers).value()); + + Http::TestHeaderMapImpl user_defined_invalid_trailers{{"grpc-status", "1024"}}; + EXPECT_EQ(Status::InvalidCode, Common::getGrpcStatus(invalid_trailers).value()); + + Http::TestHeaderMapImpl user_defined_trailers{{"grpc-status", "1024"}}; + EXPECT_EQ(1024, Common::getGrpcStatus(user_defined_trailers, true).value()); } TEST(GrpcContextTest, GetGrpcMessage) { @@ -89,14 +95,14 @@ TEST(GrpcCommonTest, GrpcStatusDetailsBin) { {"grpc-status-details-bin", "CAUSElJlc291cmNlIG5vdCBmb3VuZA"}}; auto status = Common::getGrpcStatusDetailsBin(unpadded_value); ASSERT_TRUE(status); - EXPECT_EQ(Status::GrpcStatus::NotFound, status->code()); + EXPECT_EQ(Status::WellKnownGrpcStatus::NotFound, status->code()); EXPECT_EQ("Resource not found", status->message()); Http::TestHeaderMapImpl padded_value{ {"grpc-status-details-bin", "CAUSElJlc291cmNlIG5vdCBmb3VuZA=="}}; status = Common::getGrpcStatusDetailsBin(padded_value); ASSERT_TRUE(status); - EXPECT_EQ(Status::GrpcStatus::NotFound, status->code()); + EXPECT_EQ(Status::WellKnownGrpcStatus::NotFound, status->code()); EXPECT_EQ("Resource not found", status->message()); } @@ -200,24 +206,24 @@ TEST(GrpcContextTest, PrepareHeaders) { TEST(GrpcContextTest, GrpcToHttpStatus) { const std::vector> test_set = { - {Status::GrpcStatus::Ok, 200}, - {Status::GrpcStatus::Canceled, 499}, - {Status::GrpcStatus::Unknown, 500}, - {Status::GrpcStatus::InvalidArgument, 400}, - {Status::GrpcStatus::DeadlineExceeded, 504}, - {Status::GrpcStatus::NotFound, 404}, - {Status::GrpcStatus::AlreadyExists, 409}, - {Status::GrpcStatus::PermissionDenied, 403}, - {Status::GrpcStatus::ResourceExhausted, 429}, - {Status::GrpcStatus::FailedPrecondition, 400}, - {Status::GrpcStatus::Aborted, 409}, - {Status::GrpcStatus::OutOfRange, 400}, - {Status::GrpcStatus::Unimplemented, 501}, - {Status::GrpcStatus::Internal, 500}, - {Status::GrpcStatus::Unavailable, 503}, - {Status::GrpcStatus::DataLoss, 500}, - {Status::GrpcStatus::Unauthenticated, 401}, - {Status::GrpcStatus::InvalidCode, 500}, + {Status::WellKnownGrpcStatus::Ok, 200}, + {Status::WellKnownGrpcStatus::Canceled, 499}, + {Status::WellKnownGrpcStatus::Unknown, 500}, + {Status::WellKnownGrpcStatus::InvalidArgument, 400}, + {Status::WellKnownGrpcStatus::DeadlineExceeded, 504}, + {Status::WellKnownGrpcStatus::NotFound, 404}, + {Status::WellKnownGrpcStatus::AlreadyExists, 409}, + {Status::WellKnownGrpcStatus::PermissionDenied, 403}, + {Status::WellKnownGrpcStatus::ResourceExhausted, 429}, + {Status::WellKnownGrpcStatus::FailedPrecondition, 400}, + {Status::WellKnownGrpcStatus::Aborted, 409}, + {Status::WellKnownGrpcStatus::OutOfRange, 400}, + {Status::WellKnownGrpcStatus::Unimplemented, 501}, + {Status::WellKnownGrpcStatus::Internal, 500}, + {Status::WellKnownGrpcStatus::Unavailable, 503}, + {Status::WellKnownGrpcStatus::DataLoss, 500}, + {Status::WellKnownGrpcStatus::Unauthenticated, 401}, + {Status::WellKnownGrpcStatus::InvalidCode, 500}, }; for (const auto& test_case : test_set) { EXPECT_EQ(test_case.second, Grpc::Utility::grpcToHttpStatus(test_case.first)); @@ -226,11 +232,15 @@ TEST(GrpcContextTest, GrpcToHttpStatus) { TEST(GrpcContextTest, HttpToGrpcStatus) { const std::vector> test_set = { - {400, Status::GrpcStatus::Internal}, {401, Status::GrpcStatus::Unauthenticated}, - {403, Status::GrpcStatus::PermissionDenied}, {404, Status::GrpcStatus::Unimplemented}, - {429, Status::GrpcStatus::Unavailable}, {502, Status::GrpcStatus::Unavailable}, - {503, Status::GrpcStatus::Unavailable}, {504, Status::GrpcStatus::Unavailable}, - {500, Status::GrpcStatus::Unknown}, + {400, Status::WellKnownGrpcStatus::Internal}, + {401, Status::WellKnownGrpcStatus::Unauthenticated}, + {403, Status::WellKnownGrpcStatus::PermissionDenied}, + {404, Status::WellKnownGrpcStatus::Unimplemented}, + {429, Status::WellKnownGrpcStatus::Unavailable}, + {502, Status::WellKnownGrpcStatus::Unavailable}, + {503, Status::WellKnownGrpcStatus::Unavailable}, + {504, Status::WellKnownGrpcStatus::Unavailable}, + {500, Status::WellKnownGrpcStatus::Unknown}, }; for (const auto& test_case : test_set) { EXPECT_EQ(test_case.second, Grpc::Utility::httpToGrpcStatus(test_case.first)); diff --git a/test/common/grpc/google_async_client_impl_test.cc b/test/common/grpc/google_async_client_impl_test.cc index ffc9c395fc84..40a7d02da2da 100644 --- a/test/common/grpc/google_async_client_impl_test.cc +++ b/test/common/grpc/google_async_client_impl_test.cc @@ -78,7 +78,7 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, StreamHttpStartFail) { MockAsyncStreamCallbacks grpc_callbacks; EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_)); EXPECT_CALL(grpc_callbacks, onReceiveTrailingMetadata_(_)); - EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::GrpcStatus::Unavailable, "")); + EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); EXPECT_TRUE(grpc_stream == nullptr); @@ -90,7 +90,7 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, RequestHttpStartFail) { EXPECT_CALL(*stub_factory_.stub_, PrepareCall_(_, _, _)).WillOnce(Return(nullptr)); MockAsyncRequestCallbacks grpc_callbacks; EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_)); - EXPECT_CALL(grpc_callbacks, onFailure(Status::GrpcStatus::Unavailable, "", _)); + EXPECT_CALL(grpc_callbacks, onFailure(Status::WellKnownGrpcStatus::Unavailable, "", _)); helloworld::HelloRequest request_msg; Tracing::MockSpan active_span; diff --git a/test/common/grpc/grpc_client_integration_test.cc b/test/common/grpc/grpc_client_integration_test.cc index f0dbc9690f17..d2c93021882f 100644 --- a/test/common/grpc/grpc_client_integration_test.cc +++ b/test/common/grpc/grpc_client_integration_test.cc @@ -25,7 +25,7 @@ TEST_P(GrpcClientIntegrationTest, BasicStream) { stream->sendRequest(); stream->sendServerInitialMetadata(empty_metadata_); stream->sendReply(); - stream->sendServerTrailers(Status::GrpcStatus::Ok, "", empty_metadata_); + stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, "", empty_metadata_); dispatcher_helper_.runDispatcher(); } @@ -54,8 +54,8 @@ TEST_P(GrpcClientIntegrationTest, MultiStream) { stream_1->sendRequest(); stream_0->sendServerInitialMetadata(empty_metadata_); stream_0->sendReply(); - stream_1->sendServerTrailers(Status::GrpcStatus::Unavailable, "", empty_metadata_, true); - stream_0->sendServerTrailers(Status::GrpcStatus::Ok, "", empty_metadata_); + stream_1->sendServerTrailers(Status::WellKnownGrpcStatus::Unavailable, "", empty_metadata_, true); + stream_0->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, "", empty_metadata_); dispatcher_helper_.runDispatcher(); } @@ -80,8 +80,8 @@ TEST_P(GrpcClientIntegrationTest, HttpNon200Status) { // Technically this should be // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md // as given by Grpc::Utility::httpToGrpcStatus(), but the Google gRPC client treats - // this as GrpcStatus::Canceled. - stream->expectGrpcStatus(Status::GrpcStatus::Canceled); + // this as WellKnownGrpcStatus::Canceled. + stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Canceled); stream->fake_stream_->encodeHeaders(reply_headers, true); dispatcher_helper_.runDispatcher(); } @@ -93,11 +93,11 @@ TEST_P(GrpcClientIntegrationTest, GrpcStatusFallback) { auto stream = createStream(empty_metadata_); const Http::TestHeaderMapImpl reply_headers{ {":status", "404"}, - {"grpc-status", std::to_string(enumToInt(Status::GrpcStatus::PermissionDenied))}, + {"grpc-status", std::to_string(enumToInt(Status::WellKnownGrpcStatus::PermissionDenied))}, {"grpc-message", "error message"}}; stream->expectInitialMetadata(empty_metadata_); stream->expectTrailingMetadata(empty_metadata_); - stream->expectGrpcStatus(Status::GrpcStatus::PermissionDenied); + stream->expectGrpcStatus(Status::WellKnownGrpcStatus::PermissionDenied); stream->fake_stream_->encodeHeaders(reply_headers, true); dispatcher_helper_.runDispatcher(); } @@ -109,7 +109,7 @@ TEST_P(GrpcClientIntegrationTest, HttpReset) { stream->sendServerInitialMetadata(empty_metadata_); dispatcher_helper_.runDispatcher(); stream->expectTrailingMetadata(empty_metadata_); - stream->expectGrpcStatus(Status::GrpcStatus::Internal); + stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Internal); stream->fake_stream_->encodeResetStream(); dispatcher_helper_.runDispatcher(); } @@ -125,7 +125,7 @@ TEST_P(GrpcClientIntegrationTest, BadReplyGrpcFraming) { stream->sendRequest(); stream->sendServerInitialMetadata(empty_metadata_); stream->expectTrailingMetadata(empty_metadata_); - stream->expectGrpcStatus(Status::GrpcStatus::Internal); + stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Internal); Buffer::OwnedImpl reply_buffer("\xde\xad\xbe\xef\x00", 5); stream->fake_stream_->encodeData(reply_buffer, true); dispatcher_helper_.runDispatcher(); @@ -138,7 +138,7 @@ TEST_P(GrpcClientIntegrationTest, BadReplyProtobuf) { stream->sendRequest(); stream->sendServerInitialMetadata(empty_metadata_); stream->expectTrailingMetadata(empty_metadata_); - stream->expectGrpcStatus(Status::GrpcStatus::Internal); + stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Internal); Buffer::OwnedImpl reply_buffer("\x00\x00\x00\x00\x02\xff\xff", 7); stream->fake_stream_->encodeData(reply_buffer, true); dispatcher_helper_.runDispatcher(); @@ -174,7 +174,7 @@ TEST_P(GrpcClientIntegrationTest, OutOfRangeGrpcStatus) { stream->sendReply(); EXPECT_CALL(*stream, onReceiveTrailingMetadata_(_)).WillExitIfNeeded(); dispatcher_helper_.setStreamEventPending(); - stream->expectGrpcStatus(Status::GrpcStatus::InvalidCode); + stream->expectGrpcStatus(Status::WellKnownGrpcStatus::InvalidCode); const Http::TestHeaderMapImpl reply_trailers{{"grpc-status", std::to_string(0x1337)}}; stream->fake_stream_->encodeTrailers(reply_trailers); dispatcher_helper_.runDispatcher(); @@ -188,7 +188,7 @@ TEST_P(GrpcClientIntegrationTest, MissingGrpcStatus) { stream->sendReply(); EXPECT_CALL(*stream, onReceiveTrailingMetadata_(_)).WillExitIfNeeded(); dispatcher_helper_.setStreamEventPending(); - stream->expectGrpcStatus(Status::GrpcStatus::Unknown); + stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Unknown); const Http::TestHeaderMapImpl reply_trailers{{"some", "other header"}}; stream->fake_stream_->encodeTrailers(reply_trailers); dispatcher_helper_.runDispatcher(); @@ -205,7 +205,7 @@ TEST_P(GrpcClientIntegrationTest, ReplyNoTrailers) { EXPECT_CALL(*stream, onReceiveMessage_(HelloworldReplyEq(HELLO_REPLY))).WillExitIfNeeded(); dispatcher_helper_.setStreamEventPending(); stream->expectTrailingMetadata(empty_metadata_); - stream->expectGrpcStatus(Status::GrpcStatus::InvalidCode); + stream->expectGrpcStatus(Status::WellKnownGrpcStatus::InvalidCode); auto serialized_response = Grpc::Common::serializeToGrpcFrame(reply); stream->fake_stream_->encodeData(*serialized_response, true); stream->fake_stream_->encodeResetStream(); @@ -220,7 +220,7 @@ TEST_P(GrpcClientIntegrationTest, StreamClientInitialMetadata) { {Http::LowerCaseString("baz"), "blah"}, }; auto stream = createStream(initial_metadata); - stream->sendServerTrailers(Status::GrpcStatus::Ok, "", empty_metadata_, true); + stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, "", empty_metadata_, true); dispatcher_helper_.runDispatcher(); } @@ -258,7 +258,7 @@ TEST_P(GrpcClientIntegrationTest, ServerInitialMetadata) { }; stream->sendServerInitialMetadata(initial_metadata); stream->sendReply(); - stream->sendServerTrailers(Status::GrpcStatus::Ok, "", empty_metadata_); + stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, "", empty_metadata_); dispatcher_helper_.runDispatcher(); } @@ -273,7 +273,7 @@ TEST_P(GrpcClientIntegrationTest, ServerTrailingMetadata) { {Http::LowerCaseString("foo"), "bar"}, {Http::LowerCaseString("baz"), "blah"}, }; - stream->sendServerTrailers(Status::GrpcStatus::Ok, "", trailing_metadata); + stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, "", trailing_metadata); dispatcher_helper_.runDispatcher(); } @@ -281,7 +281,7 @@ TEST_P(GrpcClientIntegrationTest, ServerTrailingMetadata) { TEST_P(GrpcClientIntegrationTest, StreamTrailersOnly) { initialize(); auto stream = createStream(empty_metadata_); - stream->sendServerTrailers(Status::GrpcStatus::Ok, "", empty_metadata_, true); + stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, "", empty_metadata_, true); dispatcher_helper_.runDispatcher(); } @@ -308,7 +308,7 @@ TEST_P(GrpcClientIntegrationTest, ResourceExhaustedError) { stream->sendServerInitialMetadata(empty_metadata_); stream->sendReply(); dispatcher_helper_.runDispatcher(); - stream->sendServerTrailers(Status::GrpcStatus::ResourceExhausted, "error message", + stream->sendServerTrailers(Status::WellKnownGrpcStatus::ResourceExhausted, "error message", empty_metadata_); dispatcher_helper_.runDispatcher(); } @@ -318,18 +318,20 @@ TEST_P(GrpcClientIntegrationTest, UnauthenticatedError) { initialize(); auto stream = createStream(empty_metadata_); stream->sendServerInitialMetadata(empty_metadata_); - stream->sendServerTrailers(Status::GrpcStatus::Unauthenticated, "error message", empty_metadata_); + stream->sendServerTrailers(Status::WellKnownGrpcStatus::Unauthenticated, "error message", + empty_metadata_); dispatcher_helper_.runDispatcher(); } // Validate that a trailers reply is still handled even if a grpc status code larger than -// MaximumValid, is handled. -TEST_P(GrpcClientIntegrationTest, MaximumValidPlusOne) { +// MaximumKnown, is handled. +TEST_P(GrpcClientIntegrationTest, MaximumKnownPlusOne) { initialize(); auto stream = createStream(empty_metadata_); stream->sendServerInitialMetadata(empty_metadata_); - stream->sendServerTrailers(static_cast(Status::GrpcStatus::MaximumValid + 1), - "error message", empty_metadata_); + stream->sendServerTrailers( + static_cast(Status::WellKnownGrpcStatus::MaximumKnown + 1), + "error message", empty_metadata_); dispatcher_helper_.runDispatcher(); } @@ -340,7 +342,7 @@ TEST_P(GrpcClientIntegrationTest, ReceiveAfterLocalClose) { stream->sendRequest(true); stream->sendServerInitialMetadata(empty_metadata_); stream->sendReply(); - stream->sendServerTrailers(Status::GrpcStatus::Ok, "", empty_metadata_); + stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, "", empty_metadata_); dispatcher_helper_.runDispatcher(); } @@ -458,7 +460,7 @@ TEST_P(GrpcAccessTokenClientIntegrationTest, AccessTokenAuthStream) { stream->sendServerInitialMetadata(empty_metadata_); stream->sendRequest(); stream->sendReply(); - stream->sendServerTrailers(Status::GrpcStatus::Ok, "", empty_metadata_); + stream->sendServerTrailers(Status::WellKnownGrpcStatus::Ok, "", empty_metadata_); dispatcher_helper_.runDispatcher(); } diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index cb9dc9fa3cee..8b37766adb6d 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -148,10 +148,11 @@ class HelloworldStream : public MockAsyncStreamCallbacks } void expectGrpcStatus(Status::GrpcStatus grpc_status) { - if (grpc_status == Status::GrpcStatus::InvalidCode) { + if (grpc_status == Status::WellKnownGrpcStatus::InvalidCode) { EXPECT_CALL(*this, onRemoteClose(_, _)).WillExitIfNeeded(); - } else if (grpc_status > Status::GrpcStatus::MaximumValid) { - EXPECT_CALL(*this, onRemoteClose(Status::GrpcStatus::InvalidCode, _)).WillExitIfNeeded(); + } else if (grpc_status > Status::WellKnownGrpcStatus::MaximumKnown) { + EXPECT_CALL(*this, onRemoteClose(Status::WellKnownGrpcStatus::InvalidCode, _)) + .WillExitIfNeeded(); } else { EXPECT_CALL(*this, onRemoteClose(grpc_status, _)).WillExitIfNeeded(); } diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index a9f96f315aeb..67585504416e 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -457,7 +457,7 @@ TEST(HttpUtility, SendLocalGrpcReply) { EXPECT_EQ(headers.Status()->value().getStringView(), "200"); EXPECT_NE(headers.GrpcStatus(), nullptr); EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), - std::to_string(enumToInt(Grpc::Status::GrpcStatus::Unknown))); + std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unknown))); EXPECT_NE(headers.GrpcMessage(), nullptr); EXPECT_EQ(headers.GrpcMessage()->value().getStringView(), "large"); })); @@ -483,7 +483,7 @@ TEST(HttpUtility, SendLocalGrpcReplyWithUpstreamJsonPayload) { EXPECT_EQ(headers.Status()->value().getStringView(), "200"); EXPECT_NE(headers.GrpcStatus(), nullptr); EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), - std::to_string(enumToInt(Grpc::Status::GrpcStatus::Unauthenticated))); + std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unauthenticated))); EXPECT_NE(headers.GrpcMessage(), nullptr); const auto& encoded = Utility::PercentEncoding::encode(json); EXPECT_EQ(headers.GrpcMessage()->value().getStringView(), encoded); @@ -499,7 +499,7 @@ TEST(HttpUtility, RateLimitedGrpcStatus) { .WillOnce(Invoke([&](const HeaderMap& headers, bool) -> void { EXPECT_NE(headers.GrpcStatus(), nullptr); EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), - std::to_string(enumToInt(Grpc::Status::GrpcStatus::Unavailable))); + std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unavailable))); })); Utility::sendLocalReply(true, callbacks, false, Http::Code::TooManyRequests, "", absl::nullopt, false); @@ -508,12 +508,12 @@ TEST(HttpUtility, RateLimitedGrpcStatus) { .WillOnce(Invoke([&](const HeaderMap& headers, bool) -> void { EXPECT_NE(headers.GrpcStatus(), nullptr); EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), - std::to_string(enumToInt(Grpc::Status::GrpcStatus::ResourceExhausted))); + std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted))); })); - Utility::sendLocalReply( - true, callbacks, false, Http::Code::TooManyRequests, "", - absl::make_optional(Grpc::Status::GrpcStatus::ResourceExhausted), - false); + Utility::sendLocalReply(true, callbacks, false, Http::Code::TooManyRequests, "", + absl::make_optional( + Grpc::Status::WellKnownGrpcStatus::ResourceExhausted), + false); } TEST(HttpUtility, SendLocalReplyDestroyedEarly) { diff --git a/test/common/upstream/load_stats_reporter_test.cc b/test/common/upstream/load_stats_reporter_test.cc index 519c4fafc50a..fe22e4a4c356 100644 --- a/test/common/upstream/load_stats_reporter_test.cc +++ b/test/common/upstream/load_stats_reporter_test.cc @@ -223,7 +223,7 @@ TEST_F(LoadStatsReporterTest, RemoteStreamClose) { createLoadStatsReporter(); EXPECT_CALL(*response_timer_, disableTimer()); EXPECT_CALL(*retry_timer_, enableTimer(_, _)); - load_stats_reporter_->onRemoteClose(Grpc::Status::GrpcStatus::Canceled, ""); + load_stats_reporter_->onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage({}); retry_timer_cb_(); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index 89fe617e87a4..832467749e3c 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -74,7 +74,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { auto check_response = std::make_unique(); auto status = check_response->mutable_status(); - status->set_code(Grpc::Status::GrpcStatus::Ok); + status->set_code(Grpc::Status::WellKnownGrpcStatus::Ok); auto authz_response = Response{}; authz_response.status = CheckStatus::OK; @@ -97,8 +97,9 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithAllAtributes) { const std::string empty_body{}; const auto expected_headers = TestCommon::makeHeaderValueOption({{"foo", "bar", false}}); - auto check_response = TestCommon::makeCheckResponse( - Grpc::Status::GrpcStatus::Ok, envoy::type::StatusCode::OK, empty_body, expected_headers); + auto check_response = + TestCommon::makeCheckResponse(Grpc::Status::WellKnownGrpcStatus::Ok, + envoy::type::StatusCode::OK, empty_body, expected_headers); auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK, Http::Code::OK, empty_body, expected_headers); @@ -121,7 +122,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDenied) { auto check_response = std::make_unique(); auto status = check_response->mutable_status(); - status->set_code(Grpc::Status::GrpcStatus::PermissionDenied); + status->set_code(Grpc::Status::WellKnownGrpcStatus::PermissionDenied); auto authz_response = Response{}; authz_response.status = CheckStatus::Denied; @@ -145,7 +146,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) { auto check_response = std::make_unique(); auto status = check_response->mutable_status(); - status->set_code(Grpc::Status::GrpcStatus::Unknown); + status->set_code(Grpc::Status::WellKnownGrpcStatus::Unknown); auto authz_response = Response{}; authz_response.status = CheckStatus::Denied; @@ -170,9 +171,9 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { const std::string expected_body{"test"}; const auto expected_headers = TestCommon::makeHeaderValueOption({{"foo", "bar", false}, {"foobar", "bar", true}}); - auto check_response = TestCommon::makeCheckResponse(Grpc::Status::GrpcStatus::PermissionDenied, - envoy::type::StatusCode::Unauthorized, - expected_body, expected_headers); + auto check_response = TestCommon::makeCheckResponse( + Grpc::Status::WellKnownGrpcStatus::PermissionDenied, envoy::type::StatusCode::Unauthorized, + expected_body, expected_headers); auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::Denied, Http::Code::Unauthorized, expected_body, expected_headers); diff --git a/test/extensions/filters/common/ext_authz/test_common.cc b/test/extensions/filters/common/ext_authz/test_common.cc index a4b1dfe1ea33..50eaf9434ec4 100644 --- a/test/extensions/filters/common/ext_authz/test_common.cc +++ b/test/extensions/filters/common/ext_authz/test_common.cc @@ -18,7 +18,7 @@ CheckResponsePtr TestCommon::makeCheckResponse(Grpc::Status::GrpcStatus response auto status = response->mutable_status(); status->set_code(response_status); - if (response_status != Grpc::Status::GrpcStatus::Ok) { + if (response_status != Grpc::Status::WellKnownGrpcStatus::Ok) { const auto denied_response = response->mutable_denied_response(); if (!body.empty()) { denied_response->set_body(body); diff --git a/test/extensions/filters/common/ext_authz/test_common.h b/test/extensions/filters/common/ext_authz/test_common.h index 17d75d7e89f3..2ac2d0e7331d 100644 --- a/test/extensions/filters/common/ext_authz/test_common.h +++ b/test/extensions/filters/common/ext_authz/test_common.h @@ -29,11 +29,11 @@ class TestCommon { static Http::MessagePtr makeMessageResponse(const HeaderValueOptionVector& headers, const std::string& body = std::string{}); - static CheckResponsePtr - makeCheckResponse(Grpc::Status::GrpcStatus response_status = Grpc::Status::GrpcStatus::Ok, - envoy::type::StatusCode http_status_code = envoy::type::StatusCode::OK, - const std::string& body = std::string{}, - const HeaderValueOptionVector& headers = HeaderValueOptionVector{}); + static CheckResponsePtr makeCheckResponse( + Grpc::Status::GrpcStatus response_status = Grpc::Status::WellKnownGrpcStatus::Ok, + envoy::type::StatusCode http_status_code = envoy::type::StatusCode::OK, + const std::string& body = std::string{}, + const HeaderValueOptionVector& headers = HeaderValueOptionVector{}); static Response makeAuthzResponse(CheckStatus status, Http::Code status_code = Http::Code::OK, diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 548ee0f33fbd..56e7b33be962 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -55,7 +55,8 @@ TEST_P(AdsIntegrationTest, Failure) { EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, {}, {})); EXPECT_TRUE(compareDiscoveryRequest( - Config::TypeUrl::get().Cluster, "", {}, {}, {}, true, Grpc::Status::GrpcStatus::Internal, + Config::TypeUrl::get().Cluster, "", {}, {}, {}, true, + Grpc::Status::WellKnownGrpcStatus::Internal, fmt::format("does not match the message-wide type URL {}", Config::TypeUrl::get().Cluster))); sendDiscoveryResponse(Config::TypeUrl::get().Cluster, {buildCluster("cluster_0")}, @@ -70,7 +71,7 @@ TEST_P(AdsIntegrationTest, Failure) { EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", {"cluster_0"}, {}, {}, true, - Grpc::Status::GrpcStatus::Internal, + Grpc::Status::WellKnownGrpcStatus::Internal, fmt::format("does not match the message-wide type URL {}", Config::TypeUrl::get().ClusterLoadAssignment))); sendDiscoveryResponse( @@ -84,7 +85,8 @@ TEST_P(AdsIntegrationTest, Failure) { {buildRouteConfig("listener_0", "route_config_0")}, {}, "1"); EXPECT_TRUE(compareDiscoveryRequest( - Config::TypeUrl::get().Listener, "", {}, {}, {}, true, Grpc::Status::GrpcStatus::Internal, + Config::TypeUrl::get().Listener, "", {}, {}, {}, true, + Grpc::Status::WellKnownGrpcStatus::Internal, fmt::format("does not match the message-wide type URL {}", Config::TypeUrl::get().Listener))); sendDiscoveryResponse( Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_0")}, @@ -99,7 +101,7 @@ TEST_P(AdsIntegrationTest, Failure) { EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {}, {}, {})); EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "", {"route_config_0"}, {}, {}, true, - Grpc::Status::GrpcStatus::Internal, + Grpc::Status::WellKnownGrpcStatus::Internal, fmt::format("does not match the message-wide type URL {}", Config::TypeUrl::get().RouteConfiguration))); sendDiscoveryResponse( diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 6d02fe346c22..2d6f441d3ff3 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -672,7 +672,7 @@ AssertionResult BaseIntegrationTest::compareDeltaDiscoveryRequest( request.error_detail().code(), expected_error_code, request.error_detail().message()); } - if (expected_error_code != Grpc::Status::GrpcStatus::Ok && + if (expected_error_code != Grpc::Status::WellKnownGrpcStatus::Ok && request.error_detail().message().find(expected_error_substring) == std::string::npos) { return AssertionFailure() << "\"" << expected_error_substring << "\" is not a substring of actual error message \"" diff --git a/test/integration/integration.h b/test/integration/integration.h index 3cdabb86abfb..5b4d476ba182 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -222,14 +222,13 @@ class BaseIntegrationTest : Logger::Loggable { // available if you're writing a SotW/delta-specific test. // TODO(fredlas) expect_node was defaulting false here; the delta+SotW unification work restores // it. - AssertionResult - compareDiscoveryRequest(const std::string& expected_type_url, const std::string& expected_version, - const std::vector& expected_resource_names, - const std::vector& expected_resource_names_added, - const std::vector& expected_resource_names_removed, - bool expect_node = true, - const Protobuf::int32 expected_error_code = Grpc::Status::GrpcStatus::Ok, - const std::string& expected_error_message = ""); + AssertionResult compareDiscoveryRequest( + const std::string& expected_type_url, const std::string& expected_version, + const std::vector& expected_resource_names, + const std::vector& expected_resource_names_added, + const std::vector& expected_resource_names_removed, bool expect_node = true, + const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok, + const std::string& expected_error_message = ""); template void sendDiscoveryResponse(const std::string& type_url, const std::vector& state_of_the_world, const std::vector& added_or_updated, @@ -245,7 +244,7 @@ class BaseIntegrationTest : Logger::Loggable { const std::string& expected_type_url, const std::vector& expected_resource_subscriptions, const std::vector& expected_resource_unsubscriptions, - const Protobuf::int32 expected_error_code = Grpc::Status::GrpcStatus::Ok, + const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok, const std::string& expected_error_message = "") { return compareDeltaDiscoveryRequest(expected_type_url, expected_resource_subscriptions, expected_resource_unsubscriptions, xds_stream_, @@ -256,7 +255,7 @@ class BaseIntegrationTest : Logger::Loggable { const std::string& expected_type_url, const std::vector& expected_resource_subscriptions, const std::vector& expected_resource_unsubscriptions, FakeStreamPtr& stream, - const Protobuf::int32 expected_error_code = Grpc::Status::GrpcStatus::Ok, + const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok, const std::string& expected_error_message = ""); // TODO(fredlas) expect_node was defaulting false here; the delta+SotW unification work restores @@ -264,7 +263,7 @@ class BaseIntegrationTest : Logger::Loggable { AssertionResult compareSotwDiscoveryRequest( const std::string& expected_type_url, const std::string& expected_version, const std::vector& expected_resource_names, bool expect_node = true, - const Protobuf::int32 expected_error_code = Grpc::Status::GrpcStatus::Ok, + const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok, const std::string& expected_error_message = ""); template From a063ef8ee2cd81d369a8d2e937f8397b70066800 Mon Sep 17 00:00:00 2001 From: asraa Date: Mon, 11 Nov 2019 15:57:28 -0500 Subject: [PATCH 13/14] security: postmortems for CVE-2019-1522[56] (#8723) Signed-off-by: Asra Ali --- security/postmortems/cve-2019-15225.md | 228 +++++++++++++++++++++++++ security/postmortems/cve-2019-15226.md | 1 + tools/check_format.py | 3 +- 3 files changed, 231 insertions(+), 1 deletion(-) create mode 100644 security/postmortems/cve-2019-15225.md create mode 100644 security/postmortems/cve-2019-15226.md diff --git a/security/postmortems/cve-2019-15225.md b/security/postmortems/cve-2019-15225.md new file mode 100644 index 000000000000..1fbd02727822 --- /dev/null +++ b/security/postmortems/cve-2019-15225.md @@ -0,0 +1,228 @@ +# Security postmortem for CVE-2019-15225, CVE-2019-15226 + +## Incident date(s) + +2019-07-25 - 2019-10-10 + +## Authors + +@asraa + +## Status + +Final + +## Summary + +After an Envoy user publicly reported a crash in Envoy about regular expression matching in route +resolution (https://github.com/envoyproxy/envoy/issues/7728), the Envoy security team found that +issue could be leveraged for a DoS attack and would go through the public security release +process. The fix landed in master with a public PR (https://github.com/envoyproxy/envoy/pull/7878) +and was targeted to be included in a 1.11.2 security release. + +CVE-2019-15226 was detected via fuzzers just after the 1.11.1 security release. With the fix of +CVE-2019-15225 in progress, the Envoy security team decided to lump the two fixes into a 1.11.2 +security release. This was the first time in which the Envoy security release included a publicly +disclosed vulnerability with a fix that was merged into master. The security release included a +backported patch of the fix as well as the patches for CVE-2019-15226. + +## CVE issue(s) + +* https://github.com/envoyproxy/envoy/issues/8519 +* https://github.com/envoyproxy/envoy/issues/8520 + +## Root Causes + +CVE-2019-15225 was caused by the use of a recursive algorithm for matching regular +expressions. Envoy’s HTTP router can be configured with regular expressions for routing incoming +HTTP requests that matched header values. Envoy used the libstdc++ `std::regex` implementation for +these regular expressions. As a result, an HTTP request with sufficiently large header values may +consume large amounts of stack memory and cause abnormal process termination. Regular expressions +with the `*` or `+` quantifiers are particularly vulnerable and may cause abnormal process +termination. This appeared when matching header values of 16Kb or more. + +CVE-2019-15226 resulted from excessive iteration of the `HeaderMap` from a time-consuming header +size validation that occurred for each header added. Both codec libraries http_parser and nghttp2 +have internal limits for the maximum request header size. Envoy’s HTTP/2 codec originally checked +against a hard-coded max header size of 63K, which was just under the default max headers length in +nghttp2. The check occurred every time a header was added, resulting in O(n^2) performance. Work on +making this limit configurable (https://github.com/envoyproxy/envoy/issues/5626) also introduced the +issue in Envoy’s HTTP/1 codec, where the check was added per header field mimicking the same +problematic pattern as the original HTTP/2 codec. + +## Resolution + +To resolve the memory consumption caused by excessive memory consumption from regex matching, Envoy +1.11.2 deprecates the use of `std::regex` in user facing paths. A new safe regex matcher introduces +an explicitly configurable regex engine. Currently, the regex engine is limited to Google’s RE2 +regex engine that implements a safe subset of the std::regex language features. The existing regex +engine is in a deprecation period to allow users to switch to safe regex engines. + +Google’s RE2 regex engine is designed to complete execution in linear time +(https://github.com/google/re2/wiki/WhyRE2) and limit the amount of memory used. Envoy 1.11.2 also +includes an option to configure a “program size” when using Google RE2, a rough estimate of how +complex a compiled regex is to evaluate. A regex that has a program size greater than this value +will fail to compile. + +CVE-2019-15226 was first noticed via fuzzers when a timeout was reported by +`h1_capture_direct_fuzz_test`: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16325 on +08-09-2019. Once a reproducer was made in an Envoy deployment to confirm the issue, and some +profiling work was done by the Envoy security team, we moved to a private fix process targeting the +1.11.2 release along with CVE-2019-15225. Other calls to `byteSize()` and iterations over +`HeaderMap` were used were also analyzed for potential DoS vulnerabilities and performance issues. + +The fix re-implemented the `HeaderMapImpl::byteSize()` method to have O(1) performance by returning +a `cached_byte_size_` member to `HeaderMapImpl` that was updated as header entries are added, rather +than iterate over the `HeaderMap` to calculate the byte size. To resolve excessive iterations over +the `HeaderMap` that can appear in access logging with many header formatters and many headers, the +fix also included configurable limits for the maximum number of headers. + +The following patches were produced: +* https://github.com/envoyproxy/envoy/commit/afc39bea36fd436e54262f150c009e8d72db5014 +* https://github.com/envoyproxy/envoy/commit/5c122a35ebd7d3f7678b0f1c9846c1e282bba079 + +A 1.11.2 security release was announced on 09-18-2019. An e-mail was sent to the Envoy private +distributor list sharing the details of CVE-2019-15226. A week later, the candidate fix patches for +CVE-2019-15226 were shared with distributors on 2019-09-24. This provided two weeks for distributors +to test and prepare their software for the security release date, as per the guidelines set in place +after security release 1.9.1. + +## Detection + +CVE-2019-15225 was reported by Seikun Kambashi in a public GitHub Issue describing a crash caused by +a request with a very large URI for routes configured with a regex matcher: +https://github.com/envoyproxy/envoy/issues/7728. + +Envoy’s `route_fuzz_test`, which fuzzes route resolution and header finalization, ideally should +have caught this crash. The test takes a `RouteConfiguration` and a set of headers as inputs, and +routes a request with the input headers with the `RouteConfiguration` given. It should have been +fairly easy for the fuzzers to produce a wildcard matcher and a long header string. However, the +fuzz test itself had a logical error that resulted in ignoring input path headers and setting them +to a default value of “/”. As a result, the fuzz test would never have tested a large URI and an OOM +or crash would never have been detected. The fuzz test was fixed in +https://github.com/envoyproxy/envoy/pull/8653, and a reproducer for the CVE was added. + +The underlying issue behind CVE-2019-15226 was first noticed via fuzzers when a timeout was reported +by `h1_capture_direct_fuzz_test`: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16325. Some +profiling work revealed that `HeaderMapImpl::byteSize()`, which is O(n) in the number of headers, is +called for every single header in both HTTP/1.1 and HTTP/2 codecs. Although Envoy’s stateless HTTP/2 +header fuzzers (`request_header_fuzz_test` and `response_header_fuzz_test`) perform 10x more +executions per second than this fuzzer, these tested one header frame per testcase and used +nghttp2’s default max header frame size (16 KB). Because of this, the frame size was too small to +amplify the effect of the O(n^2) process enough to produce a timeout. + +## Action Items + +* https://github.com/envoyproxy/envoy/issues/8567 + +* https://github.com/envoyproxy/envoy/issues/8875 + +* https://github.com/envoyproxy/envoy/issues/8898 + +* https://github.com/envoyproxy/envoy/issues/8901 + +## Lessons Learned + +### What went well + +* CVE-2019-15226 was detected quickly after the fuzzer reported the timeout. + +* The fixes for CVE-2019-15226 were straightforward and localized. + +* The security release occurred on time and followed the guidelines established in + https://github.com/envoyproxy/envoy/blob/master/SECURITY.md + +### What went wrong + +* It took nearly a week to set up a branch for fix patches. This was due to some confusion over + whether to use the new GitHub Security advisories, which didn’t support the required permission + model and CI integrations. In the process, the envoy-setec branch was temporarily made readable to + all Envoy contributors. + +* While resolving the above permission issue, we hit an issue with Github permissions on envoyproxy: + people could no longer assign issues to members in the Envoy repository. This was fixed with some + restructuring of GitHub team’s to support the limited GitHub IAM model. + +* It was possible to push to envoy-setec branches by fix team, e.g. the 1.11.2 could be directly + pushed to (master as well). We need branch protection to ensure that CI gates merges; this will + provide confidence that the staged release branches are likely to work on the main Envoy + repository. + +* We had manual patch sets the day before release, but no envoy-setec branches reflecting them + passing end-to-end. We should not consider a release ready to go until it passes a full CI pass. + +* It wasn’t possible to get a full CI pass due to docs/image/etc push issues. We should have a set + of presubmits that provide a simple yes/no in the GH UX. + +* Our route resolution fuzzer would not have picked up the regex vulnerability due to a logical + error in the fuzzer. + +* Our more efficient request and response fuzzers would not have picked up this vulnerability + earlier. They only fuzz a single HEADER frame, and the maximum frame size for HTTP/2 is by default + 16 KB. + +* From a distributor: “We didn't realize about safe_regex until the note this morning. So we're + patching ... to switch to safe_regex -- would it be possible in future notes to distributors to + note if usage changes are required?” + +* We coupled the CVE-2019-15225 and CVE-2019-15226 releases. This made sense initially, due to + release overhead, but as the release date for the header map fixes was extended, it meant that a + somewhat known vulnerability was fixed on master but not on any released version of Envoy. + + +### Where we got lucky + +* Release branches (master and v1.12.2) passed had only minor CI failures + (bazel.compile_time_options) despite no complete pass of either assembled branch on private fix + branch + +## Timeline + +All times US/Pacific + +2019-07-25: +* [CVE-2019-15225] https://github.com/envoyproxy/envoy/issues/7728 was opened reporting crashes from + route regex matches with very long request URIs + +2019-08-09: +* [CVE-2019-15226] ClusterFuzz reports https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16325 + under embargo. + +2019-08-13: +* [CVE-2019-15226] Email thread to envoy-security@googlegroups.com regarding the HeaderMap + DoS. Analysis began to determine similar O(n^2) performance in code that uses HeaderMap. + +2019-08-19: +* CVE ID Request for CVE-2019-15225 and CVE-2019-15226 + +2019-08-20: +* Branch permissions / CI permissions for envoy-setec branch set up. + +2019-08-21: +* [CVE-2019-15226] Draft fix PRs for CVE-2019-15226 were shared on private Envoy security + repository. Reviews and further development occurred over the next three weeks. + +2019-08-23: +* [CVE-2019-15225] Fix for CVE-2019-15225 is opened at https://github.com/envoyproxy/envoy/pull/7878 + +2019-09-18: +* [CVE-2019-15226] CVE summary details shared with cncf-envoy-distributors-announce@lists.cncf.io. + +2019-09-19: +* [CVE-2019-15226] Vulnerability exists in all Envoy distributions for HTTP/2 traffic, CVE updated + +2019-09-24: +* [CVE-2019-15226] Candidate fix patches was shared with + cncf-envoy-distributors-announce@lists.cncf.io. + +2019-10-07: +* [CVE-2019-15226] Patch sets assembled based on previous reviewed work as manual patches. + +2019-10-08: +* [CVE-2019-15226] Some last minute patch fixup to have staged branches pass on CI. Patches + scheduled for public release. +* 11:20 AM v1.11.2 pushed +* [CVE-2019-15226] CVE updated on publication + +2019-10-10 +* [CVE-2019-15226] Filed follow-up GitHub issue https://github.com/envoyproxy/envoy/issues/8567 diff --git a/security/postmortems/cve-2019-15226.md b/security/postmortems/cve-2019-15226.md new file mode 100644 index 000000000000..12ab87df7874 --- /dev/null +++ b/security/postmortems/cve-2019-15226.md @@ -0,0 +1 @@ +See [cve-2019-15225.md](cve-2019-15225.md) diff --git a/tools/check_format.py b/tools/check_format.py index 73a49b0f88e0..d1d6171aebb4 100755 --- a/tools/check_format.py +++ b/tools/check_format.py @@ -297,7 +297,8 @@ def whitelistedForHistogramSiSuffix(name): def whitelistedForStdRegex(file_path): - return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST + return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST or file_path.endswith( + DOCS_SUFFIX) def whitelistedForGrpcInit(file_path): From 90d1094b32aa017f90cc8efcd379aeb143acabfc Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 11 Nov 2019 17:03:21 -0500 Subject: [PATCH 14/14] security: document threat model. (#8906) * Add an explicit threat model to the end user facing docs, link to this from SECURITY.md * Switch all Envoy extensions to use a new macro `envoy_cc_extension`, mandating that extensions declare a security posture. Extensions can also optionally declare `alpha` or `wip` status. * Tag all documentation sites with their well-known Envoy names. * Introduce tooling to automagically populate a list of known trusted/untrusted extensions in the threat model docs. * Generate API docs for extensions that depend on `google.protobuf.Empty`. This pattern is deprecated as per https://github.com/envoyproxy/envoy/issues/8933, but we need these for tooling support meanwhile. This work was motivated by oss-fuzz issue https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=18370 Signed-off-by: Harvey Tuch --- SECURITY.md | 28 +----- api/docs/BUILD | 3 + api/envoy/config/accesslog/v2/als.proto | 2 + api/envoy/config/accesslog/v2/file.proto | 1 + api/envoy/config/accesslog/v3alpha/als.proto | 2 + api/envoy/config/accesslog/v3alpha/file.proto | 1 + .../v2alpha/cluster.proto | 1 + .../v3alpha/cluster.proto | 1 + .../config/cluster/redis/redis_cluster.proto | 1 + .../v2alpha/adaptive_concurrency.proto | 1 + .../v3alpha/adaptive_concurrency.proto | 1 + .../config/filter/http/buffer/v2/buffer.proto | 1 + .../config/filter/http/csrf/v2/csrf.proto | 1 + .../filter/http/csrf/v3alpha/csrf.proto | 1 + .../v2alpha/dynamic_forward_proxy.proto | 1 + .../v3alpha/dynamic_forward_proxy.proto | 1 + .../filter/http/ext_authz/v2/ext_authz.proto | 1 + .../http/ext_authz/v3alpha/ext_authz.proto | 1 + .../config/filter/http/fault/v2/fault.proto | 1 + .../filter/http/fault/v3alpha/fault.proto | 1 + .../v2alpha1/config.proto | 1 + .../http/grpc_stats/v2alpha/config.proto | 1 + .../config/filter/http/gzip/v2/gzip.proto | 1 + .../v2/header_to_metadata.proto | 1 + .../http/health_check/v2/health_check.proto | 1 + .../health_check/v3alpha/health_check.proto | 1 + .../http/ip_tagging/v2/ip_tagging.proto | 1 + .../http/ip_tagging/v3alpha/ip_tagging.proto | 1 + .../http/jwt_authn/v2alpha/config.proto | 1 + .../http/jwt_authn/v3alpha/config.proto | 1 + api/envoy/config/filter/http/lua/v2/lua.proto | 1 + .../original_src/v2alpha1/original_src.proto | 1 + .../http/rate_limit/v2/rate_limit.proto | 1 + .../http/rate_limit/v3alpha/rate_limit.proto | 1 + .../config/filter/http/rbac/v2/rbac.proto | 1 + .../filter/http/rbac/v3alpha/rbac.proto | 1 + .../config/filter/http/router/v2/router.proto | 1 + .../filter/http/router/v3alpha/router.proto | 1 + .../config/filter/http/squash/v2/squash.proto | 1 + .../config/filter/http/tap/v2alpha/tap.proto | 1 + .../config/filter/http/tap/v3alpha/tap.proto | 1 + .../http/transcoder/v2/transcoder.proto | 1 + .../original_src/v2alpha1/original_src.proto | 1 + .../client_ssl_auth/v2/client_ssl_auth.proto | 1 + .../v3alpha/client_ssl_auth.proto | 1 + .../dubbo_proxy/v2alpha1/dubbo_proxy.proto | 1 + .../dubbo_proxy/v3alpha/dubbo_proxy.proto | 1 + .../network/ext_authz/v2/ext_authz.proto | 1 + .../network/ext_authz/v3alpha/ext_authz.proto | 1 + .../v2/http_connection_manager.proto | 1 + .../v3alpha/http_connection_manager.proto | 1 + .../network/mongo_proxy/v2/mongo_proxy.proto | 1 + .../mongo_proxy/v3alpha/mongo_proxy.proto | 1 + .../mysql_proxy/v1alpha1/mysql_proxy.proto | 3 +- .../network/rate_limit/v2/rate_limit.proto | 1 + .../rate_limit/v3alpha/rate_limit.proto | 1 + .../config/filter/network/rbac/v2/rbac.proto | 1 + .../filter/network/rbac/v3alpha/rbac.proto | 1 + .../network/redis_proxy/v2/redis_proxy.proto | 1 + .../redis_proxy/v3alpha/redis_proxy.proto | 1 + .../network/tcp_proxy/v2/tcp_proxy.proto | 1 + .../network/tcp_proxy/v3alpha/tcp_proxy.proto | 1 + .../thrift_proxy/v2alpha1/thrift_proxy.proto | 1 + .../thrift_proxy/v3alpha/thrift_proxy.proto | 1 + .../v1alpha1/zookeeper_proxy.proto | 6 +- .../rate_limit/v2alpha1/rate_limit.proto | 1 + .../rate_limit/v3alpha/rate_limit.proto | 1 + .../thrift/router/v2alpha1/router.proto | 1 + .../grpc_credential/v2alpha/aws_iam.proto | 1 + .../v2alpha/file_based_metadata.proto | 1 + .../grpc_credential/v3alpha/aws_iam.proto | 1 + .../v3alpha/file_based_metadata.proto | 1 + .../health_checker/redis/v2/redis.proto | 1 + .../config/metrics/v2/metrics_service.proto | 1 + api/envoy/config/metrics/v2/stats.proto | 3 + .../metrics/v3alpha/metrics_service.proto | 1 + api/envoy/config/metrics/v3alpha/stats.proto | 3 + .../fixed_heap/v2alpha/fixed_heap.proto | 1 + .../v2alpha/injected_resource.proto | 1 + .../previous_priorities_config.proto | 1 + api/envoy/config/trace/v2/trace.proto | 5 + api/envoy/config/trace/v3alpha/trace.proto | 5 + .../transport_socket/alts/v2alpha/alts.proto | 1 + .../transport_socket/tap/v2alpha/tap.proto | 1 + .../transport_socket/tap/v3alpha/tap.proto | 1 + bazel/envoy_build_system.bzl | 2 + bazel/envoy_library.bzl | 47 +++++++++ docs/build.sh | 19 +++- docs/empty_extensions.json | 66 +++++++++++++ docs/generate_extension_db.py | 57 +++++++++++ docs/generate_extension_rst.py | 36 +++++++ docs/root/api-v2/config/config.rst | 1 + docs/root/api-v2/config/filter/http/http.rst | 1 + .../config/filter/listener/listener.rst | 1 + .../api-v2/config/filter/network/network.rst | 2 + docs/root/api-v2/config/retry/retry.rst | 9 ++ .../intro/arch_overview/security/security.rst | 1 + .../arch_overview/security/threat_model.rst | 97 +++++++++++++++++++ source/extensions/access_loggers/file/BUILD | 4 +- source/extensions/access_loggers/grpc/BUILD | 7 +- .../clusters/dynamic_forward_proxy/BUILD | 5 +- source/extensions/clusters/redis/BUILD | 4 +- source/extensions/common/crypto/BUILD | 6 +- source/extensions/extensions_build_config.bzl | 6 +- .../filters/http/adaptive_concurrency/BUILD | 5 +- source/extensions/filters/http/buffer/BUILD | 4 +- source/extensions/filters/http/cors/BUILD | 4 +- source/extensions/filters/http/csrf/BUILD | 4 +- .../filters/http/dynamic_forward_proxy/BUILD | 5 +- source/extensions/filters/http/dynamo/BUILD | 4 +- .../extensions/filters/http/ext_authz/BUILD | 4 +- source/extensions/filters/http/fault/BUILD | 4 +- .../filters/http/grpc_http1_bridge/BUILD | 4 +- .../http/grpc_http1_reverse_bridge/BUILD | 5 +- .../filters/http/grpc_json_transcoder/BUILD | 4 +- .../extensions/filters/http/grpc_stats/BUILD | 6 +- source/extensions/filters/http/grpc_web/BUILD | 4 +- source/extensions/filters/http/gzip/BUILD | 4 +- .../filters/http/header_to_metadata/BUILD | 4 +- .../filters/http/health_check/BUILD | 4 +- .../extensions/filters/http/ip_tagging/BUILD | 4 +- .../extensions/filters/http/jwt_authn/BUILD | 5 +- source/extensions/filters/http/lua/BUILD | 4 +- .../filters/http/original_src/BUILD | 5 +- .../extensions/filters/http/ratelimit/BUILD | 4 +- source/extensions/filters/http/rbac/BUILD | 4 +- source/extensions/filters/http/router/BUILD | 5 +- source/extensions/filters/http/squash/BUILD | 4 +- source/extensions/filters/http/tap/BUILD | 5 +- .../filters/listener/http_inspector/BUILD | 4 +- .../filters/listener/original_dst/BUILD | 4 +- .../filters/listener/original_src/BUILD | 5 +- .../filters/listener/proxy_protocol/BUILD | 4 +- .../filters/listener/tls_inspector/BUILD | 4 +- .../filters/network/client_ssl_auth/BUILD | 4 +- .../filters/network/dubbo_proxy/BUILD | 5 +- source/extensions/filters/network/echo/BUILD | 4 +- .../filters/network/ext_authz/BUILD | 4 +- .../network/http_connection_manager/BUILD | 5 +- source/extensions/filters/network/kafka/BUILD | 5 +- .../filters/network/mongo_proxy/BUILD | 4 +- .../filters/network/mysql_proxy/BUILD | 5 +- .../filters/network/ratelimit/BUILD | 4 +- source/extensions/filters/network/rbac/BUILD | 4 +- .../filters/network/redis_proxy/BUILD | 4 +- .../filters/network/sni_cluster/BUILD | 4 +- .../filters/network/tcp_proxy/BUILD | 5 +- .../filters/network/thrift_proxy/BUILD | 5 +- .../thrift_proxy/filters/ratelimit/BUILD | 5 +- .../filters/network/thrift_proxy/router/BUILD | 5 +- .../filters/network/zookeeper_proxy/BUILD | 5 +- .../extensions/grpc_credentials/aws_iam/BUILD | 6 +- .../file_based_metadata/BUILD | 6 +- source/extensions/health_checkers/redis/BUILD | 4 +- .../resource_monitors/fixed_heap/BUILD | 5 +- .../resource_monitors/injected_resource/BUILD | 5 +- .../retry/host/omit_canary_hosts/BUILD | 4 +- .../retry/host/previous_hosts/BUILD | 4 +- .../retry/priority/previous_priorities/BUILD | 4 +- source/extensions/stat_sinks/dog_statsd/BUILD | 5 +- source/extensions/stat_sinks/hystrix/BUILD | 4 +- .../stat_sinks/metrics_service/BUILD | 4 +- source/extensions/stat_sinks/statsd/BUILD | 5 +- source/extensions/tracers/datadog/BUILD | 4 +- source/extensions/tracers/dynamic_ot/BUILD | 4 +- source/extensions/tracers/lightstep/BUILD | 4 +- source/extensions/tracers/opencensus/BUILD | 4 +- source/extensions/tracers/xray/BUILD | 5 +- source/extensions/tracers/zipkin/BUILD | 4 +- .../extensions/transport_sockets/alts/BUILD | 4 +- source/extensions/transport_sockets/tap/BUILD | 5 +- source/extensions/transport_sockets/tls/BUILD | 4 +- tools/api_proto_plugin/annotations.py | 6 ++ tools/protodoc/BUILD | 8 ++ tools/protodoc/generate_empty.py | 48 +++++++++ tools/protodoc/protodoc.py | 79 ++++++++++++++- 176 files changed, 830 insertions(+), 118 deletions(-) create mode 100644 docs/empty_extensions.json create mode 100755 docs/generate_extension_db.py create mode 100755 docs/generate_extension_rst.py create mode 100644 docs/root/api-v2/config/retry/retry.rst create mode 100644 docs/root/intro/arch_overview/security/threat_model.rst create mode 100644 tools/protodoc/generate_empty.py diff --git a/SECURITY.md b/SECURITY.md index 883b3c3b067b..42ec10e584b2 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -86,29 +86,11 @@ detect issues during their execution on ClusterFuzz. A soak period of 5 days pro guarantee, since we will invoke the security release process for medium or higher severity issues for these older bugs. -### Confidentiality, integrity and availability - -We consider vulnerabilities leading to the compromise of data confidentiality or integrity to be our -highest priority concerns. Availability, in particular in areas relating to DoS and resource -exhaustion, is also a serious security concern for Envoy operators, in particular those utilizing -Envoy in edge deployments. - -The Envoy availability stance around CPU and memory DoS, as well as Query-of-Death (QoD), is still -evolving. We will continue to iterate and fix well known resource issues in the open, e.g. overload -manager and watermark improvements. We will activate the security process for disclosures that -appear to present a risk profile that is significantly greater than the current Envoy availability -hardening status quo. Examples of disclosures that would elicit this response: -* QoD; where a single query from a client can bring down an Envoy server. -* Highly asymmetric resource exhaustion attacks, where very little traffic can cause resource - exhaustion, e.g. that delivered by a single client. - -Note that we do not currently consider the default settings for Envoy to be safe from an availability -perspective. It is necessary for operators to explicitly configure watermarks, the overload manager, -circuit breakers and other resource related features in Envoy to provide a robust availability -story. We will not act on any security disclosure that relates to a lack of safe defaults. Over -time, we will work towards improved safe-by-default configuration, but due to backwards -compatibility and performance concerns, this will require following the breaking change deprecation -policy. +### Threat model + +See https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/threat_model. +Vulnerabilities are evaluated against this threat model when deciding whether to activate the Envoy +security release process. ### Fix Team Organization diff --git a/api/docs/BUILD b/api/docs/BUILD index 806ea72bab14..3292a1212c38 100644 --- a/api/docs/BUILD +++ b/api/docs/BUILD @@ -56,11 +56,13 @@ proto_library( "//envoy/config/filter/network/ext_authz/v2:pkg", "//envoy/config/filter/network/http_connection_manager/v2:pkg", "//envoy/config/filter/network/mongo_proxy/v2:pkg", + "//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg", "//envoy/config/filter/network/rate_limit/v2:pkg", "//envoy/config/filter/network/rbac/v2:pkg", "//envoy/config/filter/network/redis_proxy/v2:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", + "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", @@ -72,6 +74,7 @@ proto_library( "//envoy/config/rbac/v2:pkg", "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", + "//envoy/config/retry/previous_priorities:pkg", "//envoy/config/trace/v2:pkg", "//envoy/config/transport_socket/alts/v2alpha:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", diff --git a/api/envoy/config/accesslog/v2/als.proto b/api/envoy/config/accesslog/v2/als.proto index 4f77fcaa4cba..a45d0ca52052 100644 --- a/api/envoy/config/accesslog/v2/als.proto +++ b/api/envoy/config/accesslog/v2/als.proto @@ -19,6 +19,7 @@ import "validate/validate.proto"; // :ref:`AccessLog `. This configuration will // populate :ref:`StreamAccessLogsMessage.http_logs // `. +// [#extension: envoy.access_loggers.http_grpc] message HttpGrpcAccessLogConfig { CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; @@ -37,6 +38,7 @@ message HttpGrpcAccessLogConfig { // Configuration for the built-in *envoy.tcp_grpc_access_log* type. This configuration will // populate *StreamAccessLogsMessage.tcp_logs*. +// [#extension: envoy.access_loggers.tcp_grpc] message TcpGrpcAccessLogConfig { CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/accesslog/v2/file.proto b/api/envoy/config/accesslog/v2/file.proto index 9ed71469882b..16a49563ffc9 100644 --- a/api/envoy/config/accesslog/v2/file.proto +++ b/api/envoy/config/accesslog/v2/file.proto @@ -11,6 +11,7 @@ import "google/protobuf/struct.proto"; import "validate/validate.proto"; // [#protodoc-title: File access log] +// [#extension: envoy.access_loggers.file] // Custom configuration for an :ref:`AccessLog ` // that writes log entries directly to a file. Configures the built-in *envoy.file_access_log* diff --git a/api/envoy/config/accesslog/v3alpha/als.proto b/api/envoy/config/accesslog/v3alpha/als.proto index cdbb81741e1f..77589fffdae1 100644 --- a/api/envoy/config/accesslog/v3alpha/als.proto +++ b/api/envoy/config/accesslog/v3alpha/als.proto @@ -19,6 +19,7 @@ import "validate/validate.proto"; // :ref:`AccessLog `. This configuration // will populate :ref:`StreamAccessLogsMessage.http_logs // `. +// [#extension: envoy.access_loggers.http_grpc] message HttpGrpcAccessLogConfig { CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; @@ -37,6 +38,7 @@ message HttpGrpcAccessLogConfig { // Configuration for the built-in *envoy.tcp_grpc_access_log* type. This configuration will // populate *StreamAccessLogsMessage.tcp_logs*. +// [#extension: envoy.access_loggers.tcp_grpc] message TcpGrpcAccessLogConfig { CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/accesslog/v3alpha/file.proto b/api/envoy/config/accesslog/v3alpha/file.proto index d8b033735e79..e826cdcb697e 100644 --- a/api/envoy/config/accesslog/v3alpha/file.proto +++ b/api/envoy/config/accesslog/v3alpha/file.proto @@ -11,6 +11,7 @@ import "google/protobuf/struct.proto"; import "validate/validate.proto"; // [#protodoc-title: File access log] +// [#extension: envoy.access_loggers.file] // Custom configuration for an :ref:`AccessLog // ` that writes log entries directly to a diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto index 9e4626c23e89..ef87aeab5c65 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview // ` for more information. +// [#extension: envoy.clusters.dynamic_forward_proxy] message ClusterConfig { // The DNS cache configuration that the cluster will attach to. Note this configuration must // match that of associated :ref:`dynamic forward proxy HTTP filter configuration diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto index be96cbf83a81..609c3f81c098 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview // ` for more information. +// [#extension: envoy.clusters.dynamic_forward_proxy] message ClusterConfig { // The DNS cache configuration that the cluster will attach to. Note this configuration must // match that of associated :ref:`dynamic forward proxy HTTP filter configuration diff --git a/api/envoy/config/cluster/redis/redis_cluster.proto b/api/envoy/config/cluster/redis/redis_cluster.proto index d3a2b3338b0e..beef21970085 100644 --- a/api/envoy/config/cluster/redis/redis_cluster.proto +++ b/api/envoy/config/cluster/redis/redis_cluster.proto @@ -48,6 +48,7 @@ import "validate/validate.proto"; // cluster_refresh_timeout: 0.5s // redirect_refresh_interval: 10s // redirect_refresh_threshold: 10 +// [#extension: envoy.clusters.redis] message RedisClusterConfig { // Interval between successive topology refresh requests. If not set, this defaults to 5s. diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto index 45d0401fe419..684e6c8499f9 100644 --- a/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto +++ b/api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto @@ -18,6 +18,7 @@ import "validate/validate.proto"; // [#protodoc-title: Adaptive Concurrency] // Adaptive Concurrency Control :ref:`configuration overview // `. +// [#extension: envoy.filters.http.adaptive_concurrency] // Configuration parameters for the gradient controller. message GradientControllerConfig { diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto index 6262a94d5500..a6bec7933262 100644 --- a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto +++ b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto @@ -18,6 +18,7 @@ import "validate/validate.proto"; // [#protodoc-title: Adaptive Concurrency] // Adaptive Concurrency Control :ref:`configuration overview // `. +// [#extension: envoy.filters.http.adaptive_concurrency] // Configuration parameters for the gradient controller. message GradientControllerConfig { diff --git a/api/envoy/config/filter/http/buffer/v2/buffer.proto b/api/envoy/config/filter/http/buffer/v2/buffer.proto index 44062f248199..f342c72d5b01 100644 --- a/api/envoy/config/filter/http/buffer/v2/buffer.proto +++ b/api/envoy/config/filter/http/buffer/v2/buffer.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: Buffer] // Buffer :ref:`configuration overview `. +// [#extension: envoy.filters.http.buffer] message Buffer { reserved 2; diff --git a/api/envoy/config/filter/http/csrf/v2/csrf.proto b/api/envoy/config/filter/http/csrf/v2/csrf.proto index faf882e896f4..acc7718b1e1b 100644 --- a/api/envoy/config/filter/http/csrf/v2/csrf.proto +++ b/api/envoy/config/filter/http/csrf/v2/csrf.proto @@ -13,6 +13,7 @@ import "validate/validate.proto"; // [#protodoc-title: CSRF] // Cross-Site Request Forgery :ref:`configuration overview `. +// [#extension: envoy.filters.http.csrf] // CSRF filter config. message CsrfPolicy { diff --git a/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto index 32dbe48ba496..5d3c3eca6f2e 100644 --- a/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto +++ b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto @@ -13,6 +13,7 @@ import "validate/validate.proto"; // [#protodoc-title: CSRF] // Cross-Site Request Forgery :ref:`configuration overview `. +// [#extension: envoy.filters.http.csrf] // CSRF filter config. message CsrfPolicy { diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index dbe548c346b5..d66fa81f8eab 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview // ` for more information. +// [#extension: envoy.filters.http.dynamic_forward_proxy] message FilterConfig { // The DNS cache configuration that the filter will attach to. Note this configuration must // match that of associated :ref:`dynamic forward proxy cluster configuration diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto index d63094f38832..62161c25f1d4 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview // ` for more information. +// [#extension: envoy.filters.http.dynamic_forward_proxy] message FilterConfig { // The DNS cache configuration that the filter will attach to. Note this configuration must // match that of associated :ref:`dynamic forward proxy cluster configuration diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index 0f70acfc076e..8e3eb7b4c2da 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -16,6 +16,7 @@ import "validate/validate.proto"; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. +// [#extension: envoy.filters.http.ext_authz] // [#next-free-field: 11] message ExtAuthz { diff --git a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto index c884f6ebe852..22ccf8b27739 100644 --- a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto @@ -16,6 +16,7 @@ import "validate/validate.proto"; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. +// [#extension: envoy.filters.http.ext_authz] // [#next-free-field: 11] message ExtAuthz { diff --git a/api/envoy/config/filter/http/fault/v2/fault.proto b/api/envoy/config/filter/http/fault/v2/fault.proto index 87c9b976355d..15103bfc15a9 100644 --- a/api/envoy/config/filter/http/fault/v2/fault.proto +++ b/api/envoy/config/filter/http/fault/v2/fault.proto @@ -16,6 +16,7 @@ import "validate/validate.proto"; // [#protodoc-title: Fault Injection] // Fault Injection :ref:`configuration overview `. +// [#extension: envoy.filters.http.fault] message FaultAbort { reserved 1; diff --git a/api/envoy/config/filter/http/fault/v3alpha/fault.proto b/api/envoy/config/filter/http/fault/v3alpha/fault.proto index 83719bbfbbc1..c85dfd495c1c 100644 --- a/api/envoy/config/filter/http/fault/v3alpha/fault.proto +++ b/api/envoy/config/filter/http/fault/v3alpha/fault.proto @@ -16,6 +16,7 @@ import "validate/validate.proto"; // [#protodoc-title: Fault Injection] // Fault Injection :ref:`configuration overview `. +// [#extension: envoy.filters.http.fault] message FaultAbort { reserved 1; diff --git a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto index 1a909b45ff6c..2e533e9f2533 100644 --- a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto +++ b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto @@ -11,6 +11,7 @@ import "validate/validate.proto"; // [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] // gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview // `. +// [#extension: envoy.filters.http.grpc_http1_reverse_bridge] // gRPC reverse bridge filter configuration message FilterConfig { diff --git a/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto b/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto index 27d1c2aab7cf..20f856881827 100644 --- a/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto +++ b/api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto @@ -10,6 +10,7 @@ import "validate/validate.proto"; // [#protodoc-title: gRPC statistics] gRPC statistics filter // :ref:`configuration overview `. +// [#extension: envoy.filters.http.grpc_stats] // gRPC statistics filter configuration message FilterConfig { diff --git a/api/envoy/config/filter/http/gzip/v2/gzip.proto b/api/envoy/config/filter/http/gzip/v2/gzip.proto index 57935e8d9857..dd78214f4e26 100644 --- a/api/envoy/config/filter/http/gzip/v2/gzip.proto +++ b/api/envoy/config/filter/http/gzip/v2/gzip.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: Gzip] // Gzip :ref:`configuration overview `. +// [#extension: envoy.filters.http.gzip] // [#next-free-field: 10] message Gzip { diff --git a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto index ec323900cabc..d59ee83e6a23 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto +++ b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // for matching load balancer subsets, logging, etc. // // Header to Metadata :ref:`configuration overview `. +// [#extension: envoy.filters.http.header_to_metadata] message Config { enum ValueType { diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto index b4c0dcb1b47b..055920ff3c15 100644 --- a/api/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto @@ -16,6 +16,7 @@ import "validate/validate.proto"; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. +// [#extension: envoy.filters.http.health_check] // [#next-free-field: 6] message HealthCheck { diff --git a/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto index 6b3169d18011..680a3fc89c9a 100644 --- a/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto @@ -16,6 +16,7 @@ import "validate/validate.proto"; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. +// [#extension: envoy.filters.http.health_check] // [#next-free-field: 6] message HealthCheck { diff --git a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto index ac088d80eaac..8e22c906b228 100644 --- a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto +++ b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: IP tagging] // IP tagging :ref:`configuration overview `. +// [#extension: envoy.filters.http.ip_tagging] message IPTagging { // The type of requests the filter should apply to. The supported types diff --git a/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto index b076b6080227..81ff557bd380 100644 --- a/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto +++ b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: IP tagging] // IP tagging :ref:`configuration overview `. +// [#extension: envoy.filters.http.ip_tagging] message IPTagging { // The type of requests the filter should apply to. The supported types diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index 998e0696278e..2d5f656ecd9c 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -17,6 +17,7 @@ import "validate/validate.proto"; // [#protodoc-title: JWT Authentication] // JWT Authentication :ref:`configuration overview `. +// [#extension: envoy.filters.http.jwt_authn] // Please see following for JWT authentication flow: // diff --git a/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto index 82759233127d..bdabd330bbf3 100644 --- a/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto @@ -17,6 +17,7 @@ import "validate/validate.proto"; // [#protodoc-title: JWT Authentication] // JWT Authentication :ref:`configuration overview `. +// [#extension: envoy.filters.http.jwt_authn] // Please see following for JWT authentication flow: // diff --git a/api/envoy/config/filter/http/lua/v2/lua.proto b/api/envoy/config/filter/http/lua/v2/lua.proto index dae34551a0ac..1bf16f3c86c7 100644 --- a/api/envoy/config/filter/http/lua/v2/lua.proto +++ b/api/envoy/config/filter/http/lua/v2/lua.proto @@ -10,6 +10,7 @@ import "validate/validate.proto"; // [#protodoc-title: Lua] // Lua :ref:`configuration overview `. +// [#extension: envoy.filters.http.lua] message Lua { // The Lua code that Envoy will execute. This can be a very small script that diff --git a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto index b5927ed0f75a..ad0082694a38 100644 --- a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto +++ b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // The Original Src filter binds upstream connections to the original source address determined // for the request. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. +// [#extension: envoy.filters.http.original_src] message OriginalSrc { // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to // ensure that non-local addresses may be routed back through envoy when binding to the original diff --git a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto index 6013f47bf8ce..0b0e1edfd5fa 100644 --- a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. +// [#extension: envoy.filters.http.ratelimit] // [#next-free-field: 8] message RateLimit { diff --git a/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto index ec8dad13e768..7380d261cf87 100644 --- a/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto +++ b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. +// [#extension: envoy.filters.http.ratelimit] // [#next-free-field: 8] message RateLimit { diff --git a/api/envoy/config/filter/http/rbac/v2/rbac.proto b/api/envoy/config/filter/http/rbac/v2/rbac.proto index 4bdd8c5f2c9c..c497aa2fa645 100644 --- a/api/envoy/config/filter/http/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/http/rbac/v2/rbac.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. +// [#extension: envoy.filters.http.rbac] // RBAC filter config. message RBAC { diff --git a/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto index 4dc9dab5c1a1..994eec6e849c 100644 --- a/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto +++ b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. +// [#extension: envoy.filters.http.rbac] // RBAC filter config. message RBAC { diff --git a/api/envoy/config/filter/http/router/v2/router.proto b/api/envoy/config/filter/http/router/v2/router.proto index fb29bbef0c5c..d8a329be25db 100644 --- a/api/envoy/config/filter/http/router/v2/router.proto +++ b/api/envoy/config/filter/http/router/v2/router.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // [#protodoc-title: Router] // Router :ref:`configuration overview `. +// [#extension: envoy.filters.http.router] // [#next-free-field: 7] message Router { diff --git a/api/envoy/config/filter/http/router/v3alpha/router.proto b/api/envoy/config/filter/http/router/v3alpha/router.proto index 4cc0eae80634..59c541ccb4d9 100644 --- a/api/envoy/config/filter/http/router/v3alpha/router.proto +++ b/api/envoy/config/filter/http/router/v3alpha/router.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // [#protodoc-title: Router] // Router :ref:`configuration overview `. +// [#extension: envoy.filters.http.router] // [#next-free-field: 7] message Router { diff --git a/api/envoy/config/filter/http/squash/v2/squash.proto b/api/envoy/config/filter/http/squash/v2/squash.proto index 49e7538596a7..8d4f352ef570 100644 --- a/api/envoy/config/filter/http/squash/v2/squash.proto +++ b/api/envoy/config/filter/http/squash/v2/squash.proto @@ -13,6 +13,7 @@ import "validate/validate.proto"; // [#protodoc-title: Squash] // Squash :ref:`configuration overview `. +// [#extension: envoy.filters.http.squash] // [#next-free-field: 6] message Squash { diff --git a/api/envoy/config/filter/http/tap/v2alpha/tap.proto b/api/envoy/config/filter/http/tap/v2alpha/tap.proto index ee9027055ab9..4dc3b33bb31a 100644 --- a/api/envoy/config/filter/http/tap/v2alpha/tap.proto +++ b/api/envoy/config/filter/http/tap/v2alpha/tap.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. +// [#extension: envoy.filters.http.tap] // Top level configuration for the tap filter. message Tap { diff --git a/api/envoy/config/filter/http/tap/v3alpha/tap.proto b/api/envoy/config/filter/http/tap/v3alpha/tap.proto index f3ec07e10438..064307cba0c5 100644 --- a/api/envoy/config/filter/http/tap/v3alpha/tap.proto +++ b/api/envoy/config/filter/http/tap/v3alpha/tap.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. +// [#extension: envoy.filters.http.tap] // Top level configuration for the tap filter. message Tap { diff --git a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto index b56232250979..af5ea5a18db4 100644 --- a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto +++ b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto @@ -10,6 +10,7 @@ import "validate/validate.proto"; // [#protodoc-title: gRPC-JSON transcoder] // gRPC-JSON transcoder :ref:`configuration overview `. +// [#extension: envoy.filters.http.grpc_json_transcoder] // [#next-free-field: 10] message GrpcJsonTranscoder { diff --git a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto index a9d8fb939a91..f6aa6b5a0e3f 100644 --- a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto +++ b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto @@ -10,6 +10,7 @@ import "validate/validate.proto"; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. +// [#extension: envoy.filters.listener.original_src] // The Original Src filter binds upstream connections to the original source address determined // for the connection. This address could come from something like the Proxy Protocol filter, or it diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto index 0a51432321c7..d9819c9e8ca8 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // [#protodoc-title: Client TLS authentication] // Client TLS authentication // :ref:`configuration overview `. +// [#extension: envoy.filters.network.client_ssl_auth] message ClientSSLAuth { // The :ref:`cluster manager ` cluster that runs diff --git a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto index 6cf616d96f67..6bfe225a1496 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto +++ b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // [#protodoc-title: Client TLS authentication] // Client TLS authentication // :ref:`configuration overview `. +// [#extension: envoy.filters.network.client_ssl_auth] message ClientSSLAuth { // The :ref:`cluster manager ` cluster that runs diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto index 2076d781612b..8a5ede0b6703 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.dubbo_proxy] // Dubbo Protocol types supported by Envoy. enum ProtocolType { diff --git a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto index f55917baf2d6..ccb885d33006 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.dubbo_proxy] // Dubbo Protocol types supported by Envoy. enum ProtocolType { diff --git a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto index 9b8e2b7a7a02..795607fcf226 100644 --- a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto @@ -13,6 +13,7 @@ import "validate/validate.proto"; // [#protodoc-title: Network External Authorization ] // The network layer external authorization service configuration // :ref:`configuration overview `. +// [#extension: envoy.filters.network.ext_authz] // External Authorization filter calls out to an external service over the // gRPC Authorization API defined by diff --git a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto index 97c6e4d45075..373034886c07 100644 --- a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto @@ -13,6 +13,7 @@ import "validate/validate.proto"; // [#protodoc-title: Network External Authorization ] // The network layer external authorization service configuration // :ref:`configuration overview `. +// [#extension: envoy.filters.network.ext_authz] // External Authorization filter calls out to an external service over the // gRPC Authorization API defined by diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 19f3fe3dd37d..5a826725e07f 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -22,6 +22,7 @@ import "validate/validate.proto"; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. +// [#extension: envoy.filters.network.http_connection_manager] // [#next-free-field: 36] message HttpConnectionManager { diff --git a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto index f96b590d7130..0cdef53dc548 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto @@ -22,6 +22,7 @@ import "validate/validate.proto"; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. +// [#extension: envoy.filters.network.http_connection_manager] // [#next-free-field: 36] message HttpConnectionManager { diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto index 724c8a3b4c40..aee2936e99a2 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto +++ b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: Mongo proxy] // MongoDB :ref:`configuration overview `. +// [#extension: envoy.filters.network.mongo_proxy] message MongoProxy { // The human readable prefix to use when emitting :ref:`statistics diff --git a/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto index 9ca7a7a3a5e6..cee198dbc12e 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto +++ b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: Mongo proxy] // MongoDB :ref:`configuration overview `. +// [#extension: envoy.filters.network.mongo_proxy] message MongoProxy { // The human readable prefix to use when emitting :ref:`statistics diff --git a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto index dee014556360..4665bbfa7ba0 100644 --- a/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto +++ b/api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto @@ -10,10 +10,11 @@ import "validate/validate.proto"; // [#protodoc-title: MySQL proxy] // MySQL Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.mysql_proxy] message MySQLProxy { // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] The optional path to use for writing MySQL access logs. // If the access log field is empty, access logs will not be written. diff --git a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto index 4e3f5fc2d289..d65797ea5126 100644 --- a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. +// [#extension: envoy.filters.network.ratelimit] // [#next-free-field: 7] message RateLimit { diff --git a/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto index f8d65b687edb..d16715013198 100644 --- a/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto +++ b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. +// [#extension: envoy.filters.network.ratelimit] // [#next-free-field: 7] message RateLimit { diff --git a/api/envoy/config/filter/network/rbac/v2/rbac.proto b/api/envoy/config/filter/network/rbac/v2/rbac.proto index ea24eb50f431..4d6cb00c62d7 100644 --- a/api/envoy/config/filter/network/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/network/rbac/v2/rbac.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. +// [#extension: envoy.filters.network.rbac] // RBAC network filter config. // diff --git a/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto index b8ec5828baec..febc45062f90 100644 --- a/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto +++ b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto @@ -12,6 +12,7 @@ import "validate/validate.proto"; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. +// [#extension: envoy.filters.network.rbac] // RBAC network filter config. // diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index 3b718c4f3416..da1acdb75c58 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // [#protodoc-title: Redis Proxy] // Redis Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.redis_proxy] // [#next-free-field: 7] message RedisProxy { diff --git a/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto index a553f830081a..9c04dbacdc3f 100644 --- a/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // [#protodoc-title: Redis Proxy] // Redis Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.redis_proxy] // [#next-free-field: 7] message RedisProxy { diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index 373d072bdbb8..c1357e90b344 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -18,6 +18,7 @@ import "validate/validate.proto"; // [#protodoc-title: TCP Proxy] // TCP Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.tcp_proxy] // [#next-free-field: 12] message TcpProxy { diff --git a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto index 9ba8419dbd14..a36676bf92b2 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto @@ -18,6 +18,7 @@ import "validate/validate.proto"; // [#protodoc-title: TCP Proxy] // TCP Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.tcp_proxy] // [#next-free-field: 12] message TcpProxy { diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto index b0e25c2407cf..a857592373c4 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.thrift_proxy] // Thrift transport types supported by Envoy. enum TransportType { diff --git a/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto index dffacb51e2da..9c5f5d3966b8 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.thrift_proxy] // Thrift transport types supported by Envoy. enum TransportType { diff --git a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto index 72d09810ff0f..cfe2a1075d86 100644 --- a/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto +++ b/api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto @@ -6,15 +6,17 @@ option java_outer_classname = "ZookeeperProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1"; -import "validate/validate.proto"; import "google/protobuf/wrappers.proto"; +import "validate/validate.proto"; + // [#protodoc-title: ZooKeeper proxy] // ZooKeeper Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.zookeeper_proxy] message ZooKeeperProxy { // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. // If the access log field is empty, access logs will not be written. diff --git a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto index 492af6cb6374..bf3cb83dcae2 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto +++ b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. +// [#extension: envoy.filters.thrift.ratelimit] // [#next-free-field: 6] message RateLimit { diff --git a/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto index 0d43762ebf74..0365f343fa84 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto +++ b/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto @@ -14,6 +14,7 @@ import "validate/validate.proto"; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. +// [#extension: envoy.filters.thrift.ratelimit] // [#next-free-field: 6] message RateLimit { diff --git a/api/envoy/config/filter/thrift/router/v2alpha1/router.proto b/api/envoy/config/filter/thrift/router/v2alpha1/router.proto index 9c9383caf33f..e5dd085569bc 100644 --- a/api/envoy/config/filter/thrift/router/v2alpha1/router.proto +++ b/api/envoy/config/filter/thrift/router/v2alpha1/router.proto @@ -8,6 +8,7 @@ option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1" // [#protodoc-title: Router] // Thrift router :ref:`configuration overview `. +// [#extension: envoy.filters.thrift.router] message Router { } diff --git a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto index 154572901a7c..353843f28c8c 100644 --- a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto @@ -10,6 +10,7 @@ import "validate/validate.proto"; // [#protodoc-title: Grpc Credentials AWS IAM] // Configuration for AWS IAM Grpc Credentials Plugin +// [#extension: envoy.grpc_credentials.aws_iam] message AwsIamConfig { // The `service namespace diff --git a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto index cd9f27d71e45..3b814ee850e9 100644 --- a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto +++ b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto @@ -10,6 +10,7 @@ import "envoy/api/v2/core/base.proto"; // [#protodoc-title: Grpc Credentials File Based Metadata] // Configuration for File Based Metadata Grpc Credentials Plugin +// [#extension: envoy.grpc_credentials.file_based_metadata] message FileBasedMetadataConfig { // Location or inline data of secret to use for authentication of the Google gRPC connection diff --git a/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto index fe100f9ded91..84c60b6f7adc 100644 --- a/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto @@ -10,6 +10,7 @@ import "validate/validate.proto"; // [#protodoc-title: Grpc Credentials AWS IAM] // Configuration for AWS IAM Grpc Credentials Plugin +// [#extension: envoy.grpc_credentials.aws_iam] message AwsIamConfig { // The `service namespace diff --git a/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto index dc8156a57b88..27cc077fd9b7 100644 --- a/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto +++ b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto @@ -10,6 +10,7 @@ import "envoy/api/v3alpha/core/base.proto"; // [#protodoc-title: Grpc Credentials File Based Metadata] // Configuration for File Based Metadata Grpc Credentials Plugin +// [#extension: envoy.grpc_credentials.file_based_metadata] message FileBasedMetadataConfig { // Location or inline data of secret to use for authentication of the Google gRPC connection diff --git a/api/envoy/config/health_checker/redis/v2/redis.proto b/api/envoy/config/health_checker/redis/v2/redis.proto index 8ab2de269a5f..5df588427daa 100644 --- a/api/envoy/config/health_checker/redis/v2/redis.proto +++ b/api/envoy/config/health_checker/redis/v2/redis.proto @@ -8,6 +8,7 @@ option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v2"; // [#protodoc-title: Redis] // Redis health checker :ref:`configuration overview `. +// [#extension: envoy.health_checkers.redis] message Redis { // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value diff --git a/api/envoy/config/metrics/v2/metrics_service.proto b/api/envoy/config/metrics/v2/metrics_service.proto index da53e5a52fdc..5fd045e47135 100644 --- a/api/envoy/config/metrics/v2/metrics_service.proto +++ b/api/envoy/config/metrics/v2/metrics_service.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // Metrics Service is configured as a built-in *envoy.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to create // Metrics Service. +// [#extension: envoy.stat_sinks.metrics_service] message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto index 79f2ce81f3ce..b46301b2783a 100644 --- a/api/envoy/config/metrics/v2/stats.proto +++ b/api/envoy/config/metrics/v2/stats.proto @@ -240,6 +240,7 @@ message TagSpecifier { // Stats configuration proto schema for built-in *envoy.statsd* sink. This sink does not support // tagged metrics. +// [#extension: envoy.stat_sinks.statsd] message StatsdSink { oneof statsd_specifier { option (validate.required) = true; @@ -288,6 +289,7 @@ message StatsdSink { // The sink emits stats with `DogStatsD `_ // compatible tags. Tags are configurable via :ref:`StatsConfig // `. +// [#extension: envoy.stat_sinks.dog_statsd] message DogStatsdSink { reserved 2; @@ -313,6 +315,7 @@ message DogStatsdSink { // Note that only a single HystrixSink should be configured. // // Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. +// [#extension: envoy.stat_sinks.hystrix] message HystrixSink { // The number of buckets the rolling statistical window is divided into. // diff --git a/api/envoy/config/metrics/v3alpha/metrics_service.proto b/api/envoy/config/metrics/v3alpha/metrics_service.proto index 83124c081771..2a3fbfb85e9c 100644 --- a/api/envoy/config/metrics/v3alpha/metrics_service.proto +++ b/api/envoy/config/metrics/v3alpha/metrics_service.proto @@ -15,6 +15,7 @@ import "validate/validate.proto"; // Metrics Service is configured as a built-in *envoy.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to // create Metrics Service. +// [#extension: envoy.stat_sinks.metrics_service] message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. api.v3alpha.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; diff --git a/api/envoy/config/metrics/v3alpha/stats.proto b/api/envoy/config/metrics/v3alpha/stats.proto index f9905f209eab..12e15390e772 100644 --- a/api/envoy/config/metrics/v3alpha/stats.proto +++ b/api/envoy/config/metrics/v3alpha/stats.proto @@ -243,6 +243,7 @@ message TagSpecifier { // Stats configuration proto schema for built-in *envoy.statsd* sink. This sink does not support // tagged metrics. +// [#extension: envoy.stat_sinks.statsd] message StatsdSink { oneof statsd_specifier { option (validate.required) = true; @@ -291,6 +292,7 @@ message StatsdSink { // The sink emits stats with `DogStatsD `_ // compatible tags. Tags are configurable via :ref:`StatsConfig // `. +// [#extension: envoy.stat_sinks.dog_statsd] message DogStatsdSink { reserved 2; @@ -316,6 +318,7 @@ message DogStatsdSink { // Note that only a single HystrixSink should be configured. // // Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. +// [#extension: envoy.stat_sinks.hystrix] message HystrixSink { // The number of buckets the rolling statistical window is divided into. // diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto index ad3d1ab1eada..2f9b035f574b 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto +++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto @@ -9,6 +9,7 @@ option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2 import "validate/validate.proto"; // [#protodoc-title: Fixed heap] +// [#extension: envoy.resource_monitors.fixed_heap] // The fixed heap resource monitor reports the Envoy process memory pressure, computed as a // fraction of currently reserved heap memory divided by a statically configured maximum diff --git a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto index 2a8a1a43150c..e31c6f5f328b 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto +++ b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto @@ -9,6 +9,7 @@ option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_reso import "validate/validate.proto"; // [#protodoc-title: Injected resource] +// [#extension: envoy.resource_monitors.injected_resource] // The injected resource monitor allows injecting a synthetic resource pressure into Envoy // via a text file, which must contain a floating-point number in the range [0..1] representing diff --git a/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto b/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto index 9ee2ed552687..ee92a2e49f05 100644 --- a/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto +++ b/api/envoy/config/retry/previous_priorities/previous_priorities_config.proto @@ -30,6 +30,7 @@ option java_package = "io.envoyproxy.envoy.config.retry.previous_priorities"; // // Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of // priorities), which might incur significant overhead for clusters with many priorities. +// [#extension: envoy.retry_priorities.previous_priorities] message PreviousPrioritiesConfig { // How often the priority load should be updated based on previously attempted priorities. Useful // to allow each priorities to receive more than one request before being excluded or to reduce diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto index 0825e697b76f..c9eec1db7ec4 100644 --- a/api/envoy/config/trace/v2/trace.proto +++ b/api/envoy/config/trace/v2/trace.proto @@ -60,6 +60,7 @@ message Tracing { } // Configuration for the LightStep tracer. +// [#extension: envoy.tracers.lightstep] message LightstepConfig { // The cluster manager cluster that hosts the LightStep collectors. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -70,6 +71,7 @@ message LightstepConfig { } // Configuration for the Zipkin tracer. +// [#extension: envoy.tracers.zipkin] // [#next-free-field: 6] message ZipkinConfig { // Available Zipkin collector endpoint versions. @@ -119,6 +121,7 @@ message ZipkinConfig { // DynamicOtConfig is used to dynamically load a tracer from a shared library // that implements the `OpenTracing dynamic loading API // `_. +// [#extension: envoy.tracers.dynamic_ot] message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. @@ -130,6 +133,7 @@ message DynamicOtConfig { } // Configuration for the Datadog tracer. +// [#extension: envoy.tracers.datadog] message DatadogConfig { // The cluster to use for submitting traces to the Datadog agent. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -140,6 +144,7 @@ message DatadogConfig { // Configuration for the OpenCensus tracer. // [#next-free-field: 13] +// [#extension: envoy.tracers.opencensus] message OpenCensusConfig { enum TraceContext { // No-op default, no trace context is utilized. diff --git a/api/envoy/config/trace/v3alpha/trace.proto b/api/envoy/config/trace/v3alpha/trace.proto index 0311e04c3b3a..f586d15b83e2 100644 --- a/api/envoy/config/trace/v3alpha/trace.proto +++ b/api/envoy/config/trace/v3alpha/trace.proto @@ -62,6 +62,7 @@ message Tracing { } // Configuration for the LightStep tracer. +// [#extension: envoy.tracers.lightstep] message LightstepConfig { // The cluster manager cluster that hosts the LightStep collectors. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -72,6 +73,7 @@ message LightstepConfig { } // Configuration for the Zipkin tracer. +// [#extension: envoy.tracers.zipkin] // [#next-free-field: 6] message ZipkinConfig { // Available Zipkin collector endpoint versions. @@ -121,6 +123,7 @@ message ZipkinConfig { // DynamicOtConfig is used to dynamically load a tracer from a shared library // that implements the `OpenTracing dynamic loading API // `_. +// [#extension: envoy.tracers.dynamic_ot] message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. @@ -132,6 +135,7 @@ message DynamicOtConfig { } // Configuration for the Datadog tracer. +// [#extension: envoy.tracers.datadog] message DatadogConfig { // The cluster to use for submitting traces to the Datadog agent. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -142,6 +146,7 @@ message DatadogConfig { // Configuration for the OpenCensus tracer. // [#next-free-field: 13] +// [#extension: envoy.tracers.opencensus] message OpenCensusConfig { enum TraceContext { // No-op default, no trace context is utilized. diff --git a/api/envoy/config/transport_socket/alts/v2alpha/alts.proto b/api/envoy/config/transport_socket/alts/v2alpha/alts.proto index e19f4520359f..668facfc61dc 100644 --- a/api/envoy/config/transport_socket/alts/v2alpha/alts.proto +++ b/api/envoy/config/transport_socket/alts/v2alpha/alts.proto @@ -9,6 +9,7 @@ option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v2alpha" import "validate/validate.proto"; // [#protodoc-title: ALTS] +// [#extension: envoy.transport_sockets.alts] // Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy. // https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ diff --git a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto index ffb121fcb061..418116336572 100644 --- a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto +++ b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto @@ -12,6 +12,7 @@ import "envoy/config/common/tap/v2alpha/common.proto"; import "validate/validate.proto"; // [#protodoc-title: Tap] +// [#extension: envoy.transport_sockets.tap] // Configuration for tap transport socket. This wraps another transport socket, providing the // ability to interpose and record in plain text any traffic that is surfaced to Envoy. diff --git a/api/envoy/config/transport_socket/tap/v3alpha/tap.proto b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto index dc5c303d0214..6a6fd972bd4b 100644 --- a/api/envoy/config/transport_socket/tap/v3alpha/tap.proto +++ b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto @@ -12,6 +12,7 @@ import "envoy/config/common/tap/v3alpha/common.proto"; import "validate/validate.proto"; // [#protodoc-title: Tap] +// [#extension: envoy.transport_sockets.tap] // Configuration for tap transport socket. This wraps another transport socket, providing the // ability to interpose and record in plain text any traffic that is surfaced to Envoy. diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 884593961cfa..4ae055e71289 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -6,6 +6,7 @@ load(":envoy_internal.bzl", "envoy_external_dep_path") load( ":envoy_library.bzl", _envoy_basic_cc_library = "envoy_basic_cc_library", + _envoy_cc_extension = "envoy_cc_extension", _envoy_cc_library = "envoy_cc_library", _envoy_cc_posix_library = "envoy_cc_posix_library", _envoy_cc_win32_library = "envoy_cc_win32_library", @@ -171,6 +172,7 @@ envoy_cc_binary = _envoy_cc_binary # Library wrappers (from envoy_library.bzl) envoy_basic_cc_library = _envoy_basic_cc_library +envoy_cc_extension = _envoy_cc_extension envoy_cc_library = _envoy_cc_library envoy_cc_posix_library = _envoy_cc_posix_library envoy_cc_win32_library = _envoy_cc_win32_library diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index 9aecc019a90f..b2fa86ed3c9f 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -29,6 +29,53 @@ def envoy_basic_cc_library(name, deps = [], external_deps = [], **kargs): **kargs ) +# All Envoy extensions must be tagged with their security hardening stance with +# respect to downstream and upstream data plane threats. These are verbose +# labels intended to make clear the trust that operators may place in +# extensions. +EXTENSION_SECURITY_POSTURES = [ + # This extension is hardened against untrusted downstream traffic. It + # assumes that the upstream is trusted. + "robust_to_untrusted_downstream", + # This extension is hardened against both untrusted downstream and upstream + # traffic. + "robust_to_untrusted_downstream_and_upstream", + # This extension is not hardened and should only be used in deployments + # where both the downstream and upstream are trusted. + "requires_trusted_downstream_and_upstream", + # This is functionally equivalent to + # requires_trusted_downstream_and_upstream, but acts as a placeholder to + # allow us to identify extensions that need classifying. + "unknown", + # Not relevant to data plane threats, e.g. stats sinks. + "data_plane_agnostic", +] + +EXTENSION_STATUS_VALUES = [ + # This extension is stable and is expected to be production usable. + "stable", + # This extension is functional but has not had substantial production burn + # time, use only with this caveat. + "alpha", + # This extension is work-in-progress. Functionality is incomplete and it is + # not intended for production use. + "wip", +] + +def envoy_cc_extension( + name, + security_posture, + # Only set this for internal, undocumented extensions. + undocumented = False, + status = "stable", + tags = [], + **kwargs): + if security_posture not in EXTENSION_SECURITY_POSTURES: + fail("Unknown extension security posture: " + security_posture) + if status not in EXTENSION_STATUS_VALUES: + fail("Unknown extension status: " + status) + envoy_cc_library(name, tags = tags, **kwargs) + # Envoy C++ library targets should be specified with this function. def envoy_cc_library( name, diff --git a/docs/build.sh b/docs/build.sh index ce7efd17b59f..795d8d6e7c25 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -53,9 +53,26 @@ rm -rf bazel-bin/external/envoy_api # This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files. BAZEL_BUILD_OPTIONS+=" --remote_download_outputs=all" +export EXTENSION_DB_PATH="$(realpath "${BUILD_DIR}/extension_db.json")" + +# Generate extension database. This maps from extension name to extension +# metadata, based on the envoy_cc_extension() Bazel target attributes. +./docs/generate_extension_db.py "${EXTENSION_DB_PATH}" + +# Generate RST for the lists of trusted/untrusted extensions in +# intro/arch_overview/security docs. +mkdir -p "${GENERATED_RST_DIR}"/intro/arch_overview/security +./docs/generate_extension_rst.py "${EXTENSION_DB_PATH}" "${GENERATED_RST_DIR}"/intro/arch_overview/security + +# Generate the extensions docs bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \ tools/protodoc/protodoc.bzl%protodoc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED=1 \ - --action_env=ENVOY_BLOB_SHA --host_force_python=PY3 + --action_env=ENVOY_BLOB_SHA --action_env=EXTENSION_DB_PATH="${EXTENSION_DB_PATH}" --host_force_python=PY3 + +# Fill in boiler plate for extensions that have google.protobuf.Empty as their +# config. +bazel run ${BAZEL_BUILD_OPTIONS} //tools/protodoc:generate_empty -- \ + "${PWD}"/docs/empty_extensions.json "${PWD}/${GENERATED_RST_DIR}"/api-v2/config # We do ** matching below to deal with Bazel cache blah (source proto artifacts # are nested inside source package targets). diff --git a/docs/empty_extensions.json b/docs/empty_extensions.json new file mode 100644 index 000000000000..b62671ca6d9d --- /dev/null +++ b/docs/empty_extensions.json @@ -0,0 +1,66 @@ +{ + "envoy.filters.http.cors": { + "title": "CORS processing", + "path": "filter/http/cors", + "description": "https://en.wikipedia.org/wiki/Cross-origin_resource_sharing", + "ref": "config_http_filters_cors" + }, + "envoy.filters.http.dynamo": { + "title": "AWS DynamoDB", + "path": "filter/http/dynamo", + "description": "https://aws.amazon.com/dynamodb/", + "ref": "config_http_filters_dynamo" + }, + "envoy.filters.http.grpc_http1_bridge": { + "title": "gRPC HTTP/1 bridge", + "path": "filter/http/grpc_http1_bridge", + "description": "HTTP filter that bridges HTTP/1.1 unary gRPC to compliant HTTP/2 gRPC", + "ref": "config_http_filters_grpc_bridge" + }, + "envoy.filters.http.grpc_web": { + "title": "gRPC Web", + "path": "filter/http/grpc_web", + "description": "https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md", + "ref": "config_http_filters_grpc_web" + }, + "envoy.filters.listener.http_inspector": { + "title": "HTTP Inspector", + "path": "filter/listener/http_inspector", + "ref": "config_listener_filters_http_inspector" + }, + "envoy.filters.listener.original_dst": { + "title": "Original Destination", + "path": "filter/listener/original_dst", + "ref": "config_listener_filters_original_dst" + }, + "envoy.filters.listener.proxy_protocol": { + "title": "Proxy Protocol", + "path": "filter/listener/proxy_protocol", + "ref": "config_listener_filters_proxy_protocol" + }, + "envoy.filters.listener.tls_inspector": { + "title": "TLS Inspector", + "path": "filter/listener/tls_inspector", + "ref": "config_listener_filters_tls_inspector" + }, + "envoy.filters.network.echo": { + "title": "Echo", + "path": "filter/network/echo", + "ref": "config_network_filters_echo" + }, + "envoy.filters.network.sni_cluster": { + "title": "SNI Cluster", + "path": "filter/network/sni_cluster", + "ref": "config_network_filters_sni_cluster" + }, + "envoy.retry_host_predicates.previous_hosts": { + "title": "Previous Hosts", + "path": "retry/previous_hosts", + "ref": "arch_overview_http_retry_plugins" + }, + "envoy.retry_host_predicates.omit_canary_hosts": { + "title": "Omit Canary Hosts", + "path": "retry/omit_canary_hosts", + "ref": "arch_overview_http_retry_plugins" + } +} diff --git a/docs/generate_extension_db.py b/docs/generate_extension_db.py new file mode 100755 index 000000000000..d021b75e48ab --- /dev/null +++ b/docs/generate_extension_db.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 + +# Generate an extension database, a JSON file mapping from qualified well known +# extension name to metadata derived from the envoy_cc_extension target. + +import json +import os +import pathlib +import shutil +import subprocess +import sys + +from importlib.util import spec_from_loader, module_from_spec +from importlib.machinery import SourceFileLoader + +BUILDOZER_PATH = os.getenv("BUILDOZER_BIN") or (os.path.expandvars("$GOPATH/bin/buildozer") if + os.getenv("GOPATH") else shutil.which("buildozer")) + +# source/extensions/extensions_build_config.bzl must have a .bzl suffix for Starlark +# import, so we are forced to do this workaround. +_extensions_build_config_spec = spec_from_loader( + 'extensions_build_config', + SourceFileLoader('extensions_build_config', 'source/extensions/extensions_build_config.bzl')) +extensions_build_config = module_from_spec(_extensions_build_config_spec) +_extensions_build_config_spec.loader.exec_module(extensions_build_config) + + +class ExtensionDbError(Exception): + pass + + +def IsMissing(value): + return value == '(missing)' + + +def GetExtensionMetadata(target): + r = subprocess.run( + [BUILDOZER_PATH, '-stdout', 'print security_posture status undocumented', target], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + security_posture, status, undocumented = r.stdout.decode('utf-8').strip().split(' ') + if IsMissing(security_posture): + raise ExtensionDbError('Missing security posture for %s' % target) + return { + 'security_posture': security_posture, + 'undocumented': False if IsMissing(undocumented) else bool(undocumented), + 'status': 'stable' if IsMissing(status) else status, + } + + +if __name__ == '__main__': + output_path = sys.argv[1] + extension_db = {} + for extension, target in extensions_build_config.EXTENSIONS.items(): + extension_db[extension] = GetExtensionMetadata(target) + + pathlib.Path(output_path).write_text(json.dumps(extension_db)) diff --git a/docs/generate_extension_rst.py b/docs/generate_extension_rst.py new file mode 100755 index 000000000000..ca98b0224019 --- /dev/null +++ b/docs/generate_extension_rst.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 + +# Generate RST lists of extensions grouped by their security posture. + +from collections import defaultdict +import json +import pathlib +import sys + + +def FormatItem(extension, metadata): + if metadata['undocumented']: + item = '* %s' % extension + else: + item = '* :ref:`%s `' % (extension, extension) + if metadata['status'] == 'alpha': + item += ' (alpha)' + return item + + +if __name__ == '__main__': + extension_db_path = sys.argv[1] + security_rst_root = sys.argv[2] + + extension_db = json.loads(pathlib.Path(extension_db_path).read_text()) + security_postures = defaultdict(list) + for extension, metadata in extension_db.items(): + security_postures[metadata['security_posture']].append(extension) + + for sp, extensions in security_postures.items(): + output_path = pathlib.Path(security_rst_root, 'secpos_%s.rst' % sp) + content = '\n'.join( + FormatItem(extension, extension_db[extension]) + for extension in sorted(extensions) + if extension_db[extension]['status'] != 'wip') + output_path.write_text(content) diff --git a/docs/root/api-v2/config/config.rst b/docs/root/api-v2/config/config.rst index 8a1881ec7019..10015222fe8e 100644 --- a/docs/root/api-v2/config/config.rst +++ b/docs/root/api-v2/config/config.rst @@ -15,3 +15,4 @@ Extensions cluster/cluster listener/listener grpc_credential/grpc_credential + retry/retry diff --git a/docs/root/api-v2/config/filter/http/http.rst b/docs/root/api-v2/config/filter/http/http.rst index 0aff5791bf6a..7746b0d72a5c 100644 --- a/docs/root/api-v2/config/filter/http/http.rst +++ b/docs/root/api-v2/config/filter/http/http.rst @@ -5,6 +5,7 @@ HTTP filters :glob: :maxdepth: 2 + */empty/* */v2/* */v2alpha/* */v2alpha1/* diff --git a/docs/root/api-v2/config/filter/listener/listener.rst b/docs/root/api-v2/config/filter/listener/listener.rst index d66039048e7a..9230a2e18a81 100644 --- a/docs/root/api-v2/config/filter/listener/listener.rst +++ b/docs/root/api-v2/config/filter/listener/listener.rst @@ -5,4 +5,5 @@ Listener filters :glob: :maxdepth: 2 + */empty/* */v2alpha1/* diff --git a/docs/root/api-v2/config/filter/network/network.rst b/docs/root/api-v2/config/filter/network/network.rst index cd12d279ff8b..706f81eccf9d 100644 --- a/docs/root/api-v2/config/filter/network/network.rst +++ b/docs/root/api-v2/config/filter/network/network.rst @@ -5,5 +5,7 @@ Network filters :glob: :maxdepth: 2 + */empty/* + */v1alpha1/* */v2/* */v2alpha1/* diff --git a/docs/root/api-v2/config/retry/retry.rst b/docs/root/api-v2/config/retry/retry.rst new file mode 100644 index 000000000000..b3f814b2a4e5 --- /dev/null +++ b/docs/root/api-v2/config/retry/retry.rst @@ -0,0 +1,9 @@ +Retry Predicates +================ + +.. toctree:: + :glob: + :maxdepth: 2 + + */empty/* + */* diff --git a/docs/root/intro/arch_overview/security/security.rst b/docs/root/intro/arch_overview/security/security.rst index 065935c6f342..16409d759de1 100644 --- a/docs/root/intro/arch_overview/security/security.rst +++ b/docs/root/intro/arch_overview/security/security.rst @@ -4,6 +4,7 @@ Security .. toctree:: :maxdepth: 2 + threat_model ssl jwt_authn_filter ext_authz_filter diff --git a/docs/root/intro/arch_overview/security/threat_model.rst b/docs/root/intro/arch_overview/security/threat_model.rst new file mode 100644 index 000000000000..765d1ba0d3a7 --- /dev/null +++ b/docs/root/intro/arch_overview/security/threat_model.rst @@ -0,0 +1,97 @@ +.. _arch_overview_threat_model: + +Threat model +============ + +Below we articulate the Envoy threat model, which is of relevance to Envoy operators, developers and +security researchers. We detail our security release process at +https://github.com/envoyproxy/envoy/security/policy. + +Confidentiality, integrity and availability +------------------------------------------- + +We consider vulnerabilities leading to the compromise of data confidentiality or integrity to be our +highest priority concerns. Availability, in particular in areas relating to DoS and resource +exhaustion, is also a serious security concern for Envoy operators, in particular those utilizing +Envoy in edge deployments. + +The Envoy availability stance around CPU and memory DoS, as well as Query-of-Death (QoD), is still +evolving. We will continue to iterate and fix well known resource issues in the open, e.g. overload +manager and watermark improvements. We will activate the security process for disclosures that +appear to present a risk profile that is significantly greater than the current Envoy availability +hardening status quo. Examples of disclosures that would elicit this response: + +* QoD; where a single query from a client can bring down an Envoy server. + +* Highly asymmetric resource exhaustion attacks, where very little traffic can cause resource exhaustion, + e.g. that delivered by a single client. + +Note that we do not currently consider the default settings for Envoy to be safe from an availability +perspective. It is necessary for operators to explicitly :ref:`configure ` +watermarks, the overload manager, circuit breakers and other resource related features in Envoy to +provide a robust availability story. We will not act on any security disclosure that relates to a +lack of safe defaults. Over time, we will work towards improved safe-by-default configuration, but +due to backwards compatibility and performance concerns, this will require following the breaking +change deprecation policy. + +Data and control plane +---------------------- + +We divide our threat model into data and control plane, reflecting the internal division in Envoy of +these concepts from an architectural perspective. Our highest priority in risk assessment is the +threat posed by untrusted downstream client traffic on the data plane. This reflects the use of +Envoy in an edge serving capacity and also the use of Envoy as an inbound destination in a service +mesh deployment. + +In addition, we have an evolving position towards any vulnerability that might be exploitable by +untrusted upstreams. We recognize that these constitute a serious security consideration, given the +use of Envoy as an egress proxy. We will activate the security release process for disclosures that +appear to present a risk profile that is significantly greater than the current Envoy upstream +hardening status quo. + +The control plane management server is generally trusted. We do not consider wire-level exploits +against the xDS transport protocol to be a concern as a result. However, the configuration delivered +to Envoy over xDS may originate from untrusted sources and may not be fully sanitized. An example of +this might be a service operator that hosts multiple tenants on an Envoy, where tenants may specify +a regular expression on a header match in `RouteConfiguration`. In this case, we expect that Envoy +is resilient against the risks posed by malicious configuration from a confidentiality, integrity +and availability perspective, as described above. + +We generally assume that services utilized for side calls during the request processing, e.g. +external authorization, credential suppliers, rate limit services, are trusted. When this is not the +case, an extension will explicitly state this in its documentation. + +Core and extensions +------------------- + +Anything in the Envoy core may be used in both untrusted and trusted deployments. As a consequence, +it should be hardened with this model in mind. Security issues related to core code will usually +trigger the security release process as described in this document. + +The following extensions are intended to be hardened against untrusted downstream and upstreams: + +.. include:: secpos_robust_to_untrusted_downstream_and_upstream.rst + +The following extensions should not be exposed to data plane attack vectors and hence are intended +to be robust to untrusted downstreams and upstreams: + +.. include:: secpos_data_plane_agnostic.rst + +The following extensions are intended to be hardened against untrusted downstreams but assume trusted +upstreams: + +.. include:: secpos_robust_to_untrusted_downstream.rst + +The following extensions should only be used when both the downstream and upstream are trusted: + +.. include:: secpos_requires_trusted_downstream_and_upstream.rst + + +The following extensions have an unknown security posture: + +.. include:: secpos_unknown.rst + +Envoy currently has two dynamic filter extensions that support loadable code; WASM and Lua. In both +cases, we assume that the dynamically loaded code is trusted. We expect the runtime for Lua to be +robust to untrusted data plane traffic with the assumption of a trusted script. WASM is still in +development, but will eventually have a similar security stance. diff --git a/source/extensions/access_loggers/file/BUILD b/source/extensions/access_loggers/file/BUILD index 7a935f7223ee..5901b7a5ce9a 100644 --- a/source/extensions/access_loggers/file/BUILD +++ b/source/extensions/access_loggers/file/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -20,10 +21,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":file_access_log_lib", "//include/envoy/registry", diff --git a/source/extensions/access_loggers/grpc/BUILD b/source/extensions/access_loggers/grpc/BUILD index 93c36f6c7c4e..3fa3e01b0e94 100644 --- a/source/extensions/access_loggers/grpc/BUILD +++ b/source/extensions/access_loggers/grpc/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -93,10 +94,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "http_config", srcs = ["http_config.cc"], hdrs = ["http_config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":config_utils", "//include/envoy/server:access_log_config_interface", @@ -110,10 +112,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "tcp_config", srcs = ["tcp_config.cc"], hdrs = ["tcp_config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":config_utils", "//include/envoy/server:access_log_config_interface", diff --git a/source/extensions/clusters/dynamic_forward_proxy/BUILD b/source/extensions/clusters/dynamic_forward_proxy/BUILD index feaa5cd8bb31..2aa9d4087987 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/source/extensions/clusters/dynamic_forward_proxy/BUILD @@ -2,16 +2,17 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "cluster", srcs = ["cluster.cc"], hdrs = ["cluster.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//source/common/network:transport_socket_options_lib", "//source/common/upstream:cluster_factory_lib", diff --git a/source/extensions/clusters/redis/BUILD b/source/extensions/clusters/redis/BUILD index d548fd659f0c..0eb379fa9d54 100644 --- a/source/extensions/clusters/redis/BUILD +++ b/source/extensions/clusters/redis/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -36,12 +37,13 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "redis_cluster", srcs = [ "redis_cluster.cc", "redis_cluster.h", ], + security_posture = "requires_trusted_downstream_and_upstream", deps = [ "redis_cluster_lb", "//include/envoy/api:api_interface", diff --git a/source/extensions/common/crypto/BUILD b/source/extensions/common/crypto/BUILD index a96daeaa193e..ec962c1ef07b 100644 --- a/source/extensions/common/crypto/BUILD +++ b/source/extensions/common/crypto/BUILD @@ -2,13 +2,13 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "utility_lib", srcs = [ "crypto_impl.cc", @@ -21,6 +21,8 @@ envoy_cc_library( external_deps = [ "ssl", ], + security_posture = "unknown", + undocumented = True, deps = [ "//include/envoy/buffer:buffer_interface", "//source/common/common:assert_lib", diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index f84620b5488f..a4445f4cfef7 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -80,8 +80,7 @@ EXTENSIONS = { "envoy.filters.network.echo": "//source/extensions/filters/network/echo:config", "envoy.filters.network.ext_authz": "//source/extensions/filters/network/ext_authz:config", "envoy.filters.network.http_connection_manager": "//source/extensions/filters/network/http_connection_manager:config", - # NOTE: Kafka filter does not have a proper filter implemented right now. We are referencing to - # codec implementation that is going to be used by the filter. + # WiP "envoy.filters.network.kafka": "//source/extensions/filters/network/kafka:kafka_request_codec_lib", "envoy.filters.network.mongo_proxy": "//source/extensions/filters/network/mongo_proxy:config", "envoy.filters.network.mysql_proxy": "//source/extensions/filters/network/mysql_proxy:config", @@ -125,6 +124,7 @@ EXTENSIONS = { "envoy.tracers.datadog": "//source/extensions/tracers/datadog:config", "envoy.tracers.zipkin": "//source/extensions/tracers/zipkin:config", "envoy.tracers.opencensus": "//source/extensions/tracers/opencensus:config", + # WiP "envoy.tracers.xray": "//source/extensions/tracers/xray:config", # @@ -137,7 +137,7 @@ EXTENSIONS = { # Retry host predicates "envoy.retry_host_predicates.previous_hosts": "//source/extensions/retry/host/previous_hosts:config", "envoy.retry_host_predicates.omit_canary_hosts": "//source/extensions/retry/host/omit_canary_hosts:config", - + # Retry priorities "envoy.retry_priorities.previous_priorities": "//source/extensions/retry/priority/previous_priorities:config", } diff --git a/source/extensions/filters/http/adaptive_concurrency/BUILD b/source/extensions/filters/http/adaptive_concurrency/BUILD index 4a142b62114c..c74ead9fdf5b 100644 --- a/source/extensions/filters/http/adaptive_concurrency/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -25,10 +26,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "unknown", + status = "alpha", deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/buffer/BUILD b/source/extensions/filters/http/buffer/BUILD index e1a544cc3e83..d4df627dbb86 100644 --- a/source/extensions/filters/http/buffer/BUILD +++ b/source/extensions/filters/http/buffer/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -32,10 +33,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/cors/BUILD b/source/extensions/filters/http/cors/BUILD index 6ddf34f61e08..d28f5e2c858e 100644 --- a/source/extensions/filters/http/cors/BUILD +++ b/source/extensions/filters/http/cors/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -26,10 +27,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/csrf/BUILD b/source/extensions/filters/http/csrf/BUILD index 2646361535db..55dcbe082de7 100644 --- a/source/extensions/filters/http/csrf/BUILD +++ b/source/extensions/filters/http/csrf/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -28,10 +29,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/dynamic_forward_proxy/BUILD b/source/extensions/filters/http/dynamic_forward_proxy/BUILD index 5c20c4ee8a7a..56dcddee2b03 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -21,10 +22,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", + status = "alpha", deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/dynamo/BUILD b/source/extensions/filters/http/dynamo/BUILD index 048be9323395..90e994cd22c2 100644 --- a/source/extensions/filters/http/dynamo/BUILD +++ b/source/extensions/filters/http/dynamo/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -37,10 +38,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", deps = [ ":dynamo_filter_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/http/ext_authz/BUILD b/source/extensions/filters/http/ext_authz/BUILD index 71fe3a3dca54..703405a0e635 100644 --- a/source/extensions/filters/http/ext_authz/BUILD +++ b/source/extensions/filters/http/ext_authz/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -34,10 +35,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":ext_authz", "//include/envoy/registry", diff --git a/source/extensions/filters/http/fault/BUILD b/source/extensions/filters/http/fault/BUILD index d1d869321b74..804d38ea6f54 100644 --- a/source/extensions/filters/http/fault/BUILD +++ b/source/extensions/filters/http/fault/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -39,10 +40,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/grpc_http1_bridge/BUILD b/source/extensions/filters/http/grpc_http1_bridge/BUILD index 6b7c57395df9..a01dd47cb585 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/source/extensions/filters/http/grpc_http1_bridge/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -28,10 +29,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "unknown", deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD b/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD index c17926c37d3b..72e6f64e8120 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -26,10 +27,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "unknown", + status = "alpha", deps = [ ":filter_lib", "//include/envoy/http:filter_interface", diff --git a/source/extensions/filters/http/grpc_json_transcoder/BUILD b/source/extensions/filters/http/grpc_json_transcoder/BUILD index 92cb1fe6e110..8a03c9f10de5 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/source/extensions/filters/http/grpc_json_transcoder/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -40,10 +41,11 @@ envoy_cc_library( deps = ["//source/common/buffer:zero_copy_input_stream_lib"], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "unknown", deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/grpc_stats/BUILD b/source/extensions/filters/http/grpc_stats/BUILD index 20cf3462b7bd..fe5e0f8bf17c 100644 --- a/source/extensions/filters/http/grpc_stats/BUILD +++ b/source/extensions/filters/http/grpc_stats/BUILD @@ -4,16 +4,18 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["grpc_stats_filter.cc"], hdrs = ["grpc_stats_filter.h"], + security_posture = "unknown", + status = "alpha", deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/grpc_web/BUILD b/source/extensions/filters/http/grpc_web/BUILD index 432e2bac3b5d..865b18763ce3 100644 --- a/source/extensions/filters/http/grpc_web/BUILD +++ b/source/extensions/filters/http/grpc_web/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -27,10 +28,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/gzip/BUILD b/source/extensions/filters/http/gzip/BUILD index 851d391961e3..0d2eef004c59 100644 --- a/source/extensions/filters/http/gzip/BUILD +++ b/source/extensions/filters/http/gzip/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -28,10 +29,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/header_to_metadata/BUILD b/source/extensions/filters/http/header_to_metadata/BUILD index d67a3115f3da..df3b285c0f1c 100644 --- a/source/extensions/filters/http/header_to_metadata/BUILD +++ b/source/extensions/filters/http/header_to_metadata/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -23,10 +24,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/common/protobuf:utility_lib", diff --git a/source/extensions/filters/http/health_check/BUILD b/source/extensions/filters/http/health_check/BUILD index e53a9f28a14b..2bd6a94e3abe 100644 --- a/source/extensions/filters/http/health_check/BUILD +++ b/source/extensions/filters/http/health_check/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -32,10 +33,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/common/http:header_utility_lib", diff --git a/source/extensions/filters/http/ip_tagging/BUILD b/source/extensions/filters/http/ip_tagging/BUILD index 65cfd0b51e3b..aab10c1124bd 100644 --- a/source/extensions/filters/http/ip_tagging/BUILD +++ b/source/extensions/filters/http/ip_tagging/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -27,10 +28,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/common/protobuf:utility_lib", diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index 8ff58827e46c..01b3f1fb99e4 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -63,10 +64,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["filter_factory.cc"], hdrs = ["filter_factory.h"], + security_posture = "robust_to_untrusted_downstream", + status = "alpha", deps = [ ":filter_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/http/lua/BUILD b/source/extensions/filters/http/lua/BUILD index 2dbdf45d0fa0..7a6fd15165fa 100644 --- a/source/extensions/filters/http/lua/BUILD +++ b/source/extensions/filters/http/lua/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -45,10 +46,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/original_src/BUILD b/source/extensions/filters/http/original_src/BUILD index 30ac140d4963..c8ac0ba12bbc 100644 --- a/source/extensions/filters/http/original_src/BUILD +++ b/source/extensions/filters/http/original_src/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -32,10 +33,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", # The extension build system requires a library named config srcs = ["original_src_config_factory.cc"], hdrs = ["original_src_config_factory.h"], + security_posture = "robust_to_untrusted_downstream", + status = "alpha", deps = [ ":config_lib", ":original_src_lib", diff --git a/source/extensions/filters/http/ratelimit/BUILD b/source/extensions/filters/http/ratelimit/BUILD index d64411c12a49..196f7147847d 100644 --- a/source/extensions/filters/http/ratelimit/BUILD +++ b/source/extensions/filters/http/ratelimit/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -29,10 +30,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":ratelimit_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/http/rbac/BUILD b/source/extensions/filters/http/rbac/BUILD index 427d145a8bde..a8dfd2ac9481 100644 --- a/source/extensions/filters/http/rbac/BUILD +++ b/source/extensions/filters/http/rbac/BUILD @@ -2,16 +2,18 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/router/BUILD b/source/extensions/filters/http/router/BUILD index 009507b535c8..c60f6e8fcedf 100644 --- a/source/extensions/filters/http/router/BUILD +++ b/source/extensions/filters/http/router/BUILD @@ -5,16 +5,17 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/common/router:router_lib", diff --git a/source/extensions/filters/http/squash/BUILD b/source/extensions/filters/http/squash/BUILD index 14d2c4563b96..0cb4ef57c39d 100644 --- a/source/extensions/filters/http/squash/BUILD +++ b/source/extensions/filters/http/squash/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -31,10 +32,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", deps = [ "//include/envoy/registry", "//source/common/protobuf:utility_lib", diff --git a/source/extensions/filters/http/tap/BUILD b/source/extensions/filters/http/tap/BUILD index 1766624a9cef..53a766186a7c 100644 --- a/source/extensions/filters/http/tap/BUILD +++ b/source/extensions/filters/http/tap/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -46,10 +47,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", deps = [ ":tap_config_impl", ":tap_filter_lib", diff --git a/source/extensions/filters/listener/http_inspector/BUILD b/source/extensions/filters/listener/http_inspector/BUILD index 6a517ad57875..70b0ca17d48d 100644 --- a/source/extensions/filters/listener/http_inspector/BUILD +++ b/source/extensions/filters/listener/http_inspector/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -27,9 +28,10 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], + security_posture = "requires_trusted_downstream_and_upstream", deps = [ ":http_inspector_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/listener/original_dst/BUILD b/source/extensions/filters/listener/original_dst/BUILD index b3843b7744dd..a4540a17a0f0 100644 --- a/source/extensions/filters/listener/original_dst/BUILD +++ b/source/extensions/filters/listener/original_dst/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -24,9 +25,10 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":original_dst_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/listener/original_src/BUILD b/source/extensions/filters/listener/original_src/BUILD index 863d2010f930..b5e54f2b7266 100644 --- a/source/extensions/filters/listener/original_src/BUILD +++ b/source/extensions/filters/listener/original_src/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -35,10 +36,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", # The extension build system requires a library named config srcs = ["original_src_config_factory.cc"], hdrs = ["original_src_config_factory.h"], + security_posture = "robust_to_untrusted_downstream", + status = "alpha", deps = [ ":config_lib", ":original_src_lib", diff --git a/source/extensions/filters/listener/proxy_protocol/BUILD b/source/extensions/filters/listener/proxy_protocol/BUILD index 5bbefff20f0d..67842549fe94 100644 --- a/source/extensions/filters/listener/proxy_protocol/BUILD +++ b/source/extensions/filters/listener/proxy_protocol/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -31,9 +32,10 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/listener/tls_inspector/BUILD b/source/extensions/filters/listener/tls_inspector/BUILD index af90ed9fcd4a..9f3a9d06bd5d 100644 --- a/source/extensions/filters/listener/tls_inspector/BUILD +++ b/source/extensions/filters/listener/tls_inspector/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -28,9 +29,10 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/network/client_ssl_auth/BUILD b/source/extensions/filters/network/client_ssl_auth/BUILD index 32c73f52f8e0..c469ebba4df7 100644 --- a/source/extensions/filters/network/client_ssl_auth/BUILD +++ b/source/extensions/filters/network/client_ssl_auth/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -35,10 +36,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":client_ssl_auth", "//include/envoy/registry", diff --git a/source/extensions/filters/network/dubbo_proxy/BUILD b/source/extensions/filters/network/dubbo_proxy/BUILD index f691ad914c73..9579275aee20 100644 --- a/source/extensions/filters/network/dubbo_proxy/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -99,10 +100,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", deps = [ ":conn_manager_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/network/echo/BUILD b/source/extensions/filters/network/echo/BUILD index 253cfb55935b..a3703ea04319 100644 --- a/source/extensions/filters/network/echo/BUILD +++ b/source/extensions/filters/network/echo/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -24,9 +25,10 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], + security_posture = "unknown", deps = [ ":echo", "//include/envoy/registry", diff --git a/source/extensions/filters/network/ext_authz/BUILD b/source/extensions/filters/network/ext_authz/BUILD index 082925e49861..68b53f7dbdba 100644 --- a/source/extensions/filters/network/ext_authz/BUILD +++ b/source/extensions/filters/network/ext_authz/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -29,10 +30,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/common/protobuf:utility_lib", diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index eaacb34f0fb0..3e0cf154ee1a 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -6,16 +6,17 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/config:config_provider_manager_interface", "//include/envoy/filesystem:filesystem_interface", diff --git a/source/extensions/filters/network/kafka/BUILD b/source/extensions/filters/network/kafka/BUILD index 8ff831a0274c..68c595608d31 100644 --- a/source/extensions/filters/network/kafka/BUILD +++ b/source/extensions/filters/network/kafka/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -22,12 +23,14 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "kafka_request_codec_lib", srcs = ["request_codec.cc"], hdrs = [ "request_codec.h", ], + security_posture = "requires_trusted_downstream_and_upstream", + status = "wip", deps = [ ":abstract_codec_lib", ":kafka_request_parser_lib", diff --git a/source/extensions/filters/network/mongo_proxy/BUILD b/source/extensions/filters/network/mongo_proxy/BUILD index 09c091efaf0a..7ac9ea6095b5 100644 --- a/source/extensions/filters/network/mongo_proxy/BUILD +++ b/source/extensions/filters/network/mongo_proxy/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -102,10 +103,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", deps = [ ":proxy_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/network/mysql_proxy/BUILD b/source/extensions/filters/network/mysql_proxy/BUILD index d8f44c28216e..a39085adfcd0 100644 --- a/source/extensions/filters/network/mysql_proxy/BUILD +++ b/source/extensions/filters/network/mysql_proxy/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -47,10 +48,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["mysql_config.cc"], hdrs = ["mysql_config.h"], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", deps = [ ":proxy_lib", "//source/extensions/filters/network:well_known_names", diff --git a/source/extensions/filters/network/ratelimit/BUILD b/source/extensions/filters/network/ratelimit/BUILD index 3e77bcefedfc..8e45eaa9b9b2 100644 --- a/source/extensions/filters/network/ratelimit/BUILD +++ b/source/extensions/filters/network/ratelimit/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -27,10 +28,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/common/protobuf:utility_lib", diff --git a/source/extensions/filters/network/rbac/BUILD b/source/extensions/filters/network/rbac/BUILD index 57ce3bb972b8..f1f202df90cf 100644 --- a/source/extensions/filters/network/rbac/BUILD +++ b/source/extensions/filters/network/rbac/BUILD @@ -2,16 +2,18 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":rbac_filter", "//include/envoy/registry", diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index e360528a86f7..0331264fe496 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -114,10 +115,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", deps = [ "//include/envoy/upstream:upstream_interface", "//source/extensions/common/redis:redirection_mgr_lib", diff --git a/source/extensions/filters/network/sni_cluster/BUILD b/source/extensions/filters/network/sni_cluster/BUILD index 60eec7e5c92b..aa8918d1abc5 100644 --- a/source/extensions/filters/network/sni_cluster/BUILD +++ b/source/extensions/filters/network/sni_cluster/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -21,10 +22,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "unknown", deps = [ ":sni_cluster", "//include/envoy/registry", diff --git a/source/extensions/filters/network/tcp_proxy/BUILD b/source/extensions/filters/network/tcp_proxy/BUILD index bd719267d0b4..4ff4d71b0c4a 100644 --- a/source/extensions/filters/network/tcp_proxy/BUILD +++ b/source/extensions/filters/network/tcp_proxy/BUILD @@ -5,16 +5,17 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ "//include/envoy/registry", "//source/common/tcp_proxy", diff --git a/source/extensions/filters/network/thrift_proxy/BUILD b/source/extensions/filters/network/thrift_proxy/BUILD index 7ed10d835209..67b3bb4b93fc 100644 --- a/source/extensions/filters/network/thrift_proxy/BUILD +++ b/source/extensions/filters/network/thrift_proxy/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -30,10 +31,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", deps = [ ":app_exception_lib", ":auto_protocol_lib", diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD index 74d12df6979e..409d28e400fb 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -26,10 +27,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", deps = [ ":ratelimit_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/network/thrift_proxy/router/BUILD b/source/extensions/filters/network/thrift_proxy/router/BUILD index d11bc2541c16..76502fffbb7a 100644 --- a/source/extensions/filters/network/thrift_proxy/router/BUILD +++ b/source/extensions/filters/network/thrift_proxy/router/BUILD @@ -2,16 +2,19 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", deps = [ ":router_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/network/zookeeper_proxy/BUILD b/source/extensions/filters/network/zookeeper_proxy/BUILD index fdbcdcdfa635..66290d5def54 100644 --- a/source/extensions/filters/network/zookeeper_proxy/BUILD +++ b/source/extensions/filters/network/zookeeper_proxy/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -37,10 +38,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", deps = [ ":proxy_lib", "//source/extensions/filters/network:well_known_names", diff --git a/source/extensions/grpc_credentials/aws_iam/BUILD b/source/extensions/grpc_credentials/aws_iam/BUILD index 460e0a824f7d..89f6b2b3806d 100644 --- a/source/extensions/grpc_credentials/aws_iam/BUILD +++ b/source/extensions/grpc_credentials/aws_iam/BUILD @@ -4,17 +4,19 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], external_deps = ["grpc"], + security_posture = "data_plane_agnostic", + status = "alpha", deps = [ "//include/envoy/grpc:google_grpc_creds_interface", "//include/envoy/registry", diff --git a/source/extensions/grpc_credentials/file_based_metadata/BUILD b/source/extensions/grpc_credentials/file_based_metadata/BUILD index 8f638758a1d2..814508881338 100644 --- a/source/extensions/grpc_credentials/file_based_metadata/BUILD +++ b/source/extensions/grpc_credentials/file_based_metadata/BUILD @@ -4,17 +4,19 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], external_deps = ["grpc"], + security_posture = "data_plane_agnostic", + status = "alpha", deps = [ "//include/envoy/grpc:google_grpc_creds_interface", "//include/envoy/registry", diff --git a/source/extensions/health_checkers/redis/BUILD b/source/extensions/health_checkers/redis/BUILD index 8789e5a1adbf..ad7e4b963528 100644 --- a/source/extensions/health_checkers/redis/BUILD +++ b/source/extensions/health_checkers/redis/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -23,10 +24,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", deps = [ ":redis", ":utility", diff --git a/source/extensions/resource_monitors/fixed_heap/BUILD b/source/extensions/resource_monitors/fixed_heap/BUILD index 681846f1c813..e54cfe813179 100644 --- a/source/extensions/resource_monitors/fixed_heap/BUILD +++ b/source/extensions/resource_monitors/fixed_heap/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -20,10 +21,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "data_plane_agnostic", + status = "alpha", deps = [ ":fixed_heap_monitor", "//include/envoy/registry", diff --git a/source/extensions/resource_monitors/injected_resource/BUILD b/source/extensions/resource_monitors/injected_resource/BUILD index 21ed8f0128e8..650d87c69b98 100644 --- a/source/extensions/resource_monitors/injected_resource/BUILD +++ b/source/extensions/resource_monitors/injected_resource/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -21,10 +22,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "data_plane_agnostic", + status = "alpha", deps = [ ":injected_resource_monitor", "//include/envoy/registry", diff --git a/source/extensions/retry/host/omit_canary_hosts/BUILD b/source/extensions/retry/host/omit_canary_hosts/BUILD index 5ee8c65978a0..8c9eebf2c0f6 100644 --- a/source/extensions/retry/host/omit_canary_hosts/BUILD +++ b/source/extensions/retry/host/omit_canary_hosts/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -16,10 +17,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":omit_canary_hosts_predicate_lib", "//include/envoy/registry", diff --git a/source/extensions/retry/host/previous_hosts/BUILD b/source/extensions/retry/host/previous_hosts/BUILD index f0e584c39a9e..b6fec5f4799e 100644 --- a/source/extensions/retry/host/previous_hosts/BUILD +++ b/source/extensions/retry/host/previous_hosts/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -16,10 +17,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":previous_hosts_predicate_lib", "//include/envoy/registry", diff --git a/source/extensions/retry/priority/previous_priorities/BUILD b/source/extensions/retry/priority/previous_priorities/BUILD index 8c9cad99cbd2..1a545c2509f0 100644 --- a/source/extensions/retry/priority/previous_priorities/BUILD +++ b/source/extensions/retry/priority/previous_priorities/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -18,10 +19,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":previous_priorities_lib", "//include/envoy/registry", diff --git a/source/extensions/stat_sinks/dog_statsd/BUILD b/source/extensions/stat_sinks/dog_statsd/BUILD index 07d7b4ab317b..b64d07e9ddc2 100644 --- a/source/extensions/stat_sinks/dog_statsd/BUILD +++ b/source/extensions/stat_sinks/dog_statsd/BUILD @@ -5,16 +5,17 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "data_plane_agnostic", deps = [ "//include/envoy/registry", "//source/common/network:address_lib", diff --git a/source/extensions/stat_sinks/hystrix/BUILD b/source/extensions/stat_sinks/hystrix/BUILD index 541ad06838af..eb38015ed6d4 100644 --- a/source/extensions/stat_sinks/hystrix/BUILD +++ b/source/extensions/stat_sinks/hystrix/BUILD @@ -4,16 +4,18 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "data_plane_agnostic", deps = [ "//include/envoy/registry", "//source/common/network:address_lib", diff --git a/source/extensions/stat_sinks/metrics_service/BUILD b/source/extensions/stat_sinks/metrics_service/BUILD index 037227d6c447..495877790e7a 100644 --- a/source/extensions/stat_sinks/metrics_service/BUILD +++ b/source/extensions/stat_sinks/metrics_service/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -36,10 +37,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "data_plane_agnostic", deps = [ "//include/envoy/registry", "//source/common/common:assert_lib", diff --git a/source/extensions/stat_sinks/statsd/BUILD b/source/extensions/stat_sinks/statsd/BUILD index af47dfef1a27..de13e0c24530 100644 --- a/source/extensions/stat_sinks/statsd/BUILD +++ b/source/extensions/stat_sinks/statsd/BUILD @@ -4,16 +4,17 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", + "envoy_cc_extension", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "data_plane_agnostic", deps = [ "//include/envoy/registry", "//source/common/network:address_lib", diff --git a/source/extensions/tracers/datadog/BUILD b/source/extensions/tracers/datadog/BUILD index c3db885e11e4..be0a767fdced 100644 --- a/source/extensions/tracers/datadog/BUILD +++ b/source/extensions/tracers/datadog/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -27,10 +28,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":datadog_tracer_lib", "//source/extensions/tracers:well_known_names", diff --git a/source/extensions/tracers/dynamic_ot/BUILD b/source/extensions/tracers/dynamic_ot/BUILD index 399f7b9338f7..c14501cf333a 100644 --- a/source/extensions/tracers/dynamic_ot/BUILD +++ b/source/extensions/tracers/dynamic_ot/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -24,10 +25,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":dynamic_opentracing_driver_lib", "//source/extensions/tracers:well_known_names", diff --git a/source/extensions/tracers/lightstep/BUILD b/source/extensions/tracers/lightstep/BUILD index 449f81692165..5e5213c600be 100644 --- a/source/extensions/tracers/lightstep/BUILD +++ b/source/extensions/tracers/lightstep/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -29,10 +30,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":lightstep_tracer_lib", "//source/extensions/tracers:well_known_names", diff --git a/source/extensions/tracers/opencensus/BUILD b/source/extensions/tracers/opencensus/BUILD index 198c98b2b6f8..0a4b250e88f1 100644 --- a/source/extensions/tracers/opencensus/BUILD +++ b/source/extensions/tracers/opencensus/BUILD @@ -4,16 +4,18 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":opencensus_tracer_impl", "//source/extensions/tracers:well_known_names", diff --git a/source/extensions/tracers/xray/BUILD b/source/extensions/tracers/xray/BUILD index a3521c6fff4c..3440de74b856 100644 --- a/source/extensions/tracers/xray/BUILD +++ b/source/extensions/tracers/xray/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -34,10 +35,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", + status = "wip", deps = [ ":xray_lib", "//source/common/config:datasource_lib", diff --git a/source/extensions/tracers/zipkin/BUILD b/source/extensions/tracers/zipkin/BUILD index fd1c72845e30..b645740e4526 100644 --- a/source/extensions/tracers/zipkin/BUILD +++ b/source/extensions/tracers/zipkin/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -62,10 +63,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", deps = [ ":zipkin_lib", "//source/extensions/tracers:well_known_names", diff --git a/source/extensions/transport_sockets/alts/BUILD b/source/extensions/transport_sockets/alts/BUILD index e860bd929975..fb19c24c2b91 100644 --- a/source/extensions/transport_sockets/alts/BUILD +++ b/source/extensions/transport_sockets/alts/BUILD @@ -5,6 +5,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -25,7 +26,7 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = [ "config.cc", @@ -33,6 +34,7 @@ envoy_cc_library( hdrs = [ "config.h", ], + security_posture = "robust_to_untrusted_downstream_and_upstream", deps = [ ":tsi_handshaker", ":tsi_socket", diff --git a/source/extensions/transport_sockets/tap/BUILD b/source/extensions/transport_sockets/tap/BUILD index 5de3de434029..81cd27b5a497 100644 --- a/source/extensions/transport_sockets/tap/BUILD +++ b/source/extensions/transport_sockets/tap/BUILD @@ -4,6 +4,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -44,10 +45,12 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", deps = [ ":tap_config_impl", ":tap_lib", diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index 2d515d0d75a6..6ccc256a98bf 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -4,16 +4,18 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) envoy_package() -envoy_cc_library( +envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", deps = [ ":ssl_socket_lib", "//include/envoy/network:transport_socket_interface", diff --git a/tools/api_proto_plugin/annotations.py b/tools/api_proto_plugin/annotations.py index 6c5775bc721b..48452742125a 100644 --- a/tools/api_proto_plugin/annotations.py +++ b/tools/api_proto_plugin/annotations.py @@ -10,6 +10,11 @@ # Page/section titles with special prefixes in the proto comments DOC_TITLE_ANNOTATION = 'protodoc-title' +# When documenting an extension, this should be used to specify the qualified +# name that the extension registers as in the static registry, e.g. +# envoy.filters.network.http_connection_manager. +EXTENSION_ANNOTATION = 'extension' + # Not implemented yet annotation on leading comments, leading to hiding of # field. NOT_IMPLEMENTED_HIDE_ANNOTATION = 'not-implemented-hide' @@ -26,6 +31,7 @@ VALID_ANNOTATIONS = set([ DOC_TITLE_ANNOTATION, + EXTENSION_ANNOTATION, NOT_IMPLEMENTED_HIDE_ANNOTATION, NEXT_FREE_FIELD_ANNOTATION, NEXT_MAJOR_VERSION_ANNOTATION, diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index d2c9b12a6727..2bd707940b41 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -1,5 +1,13 @@ licenses(["notice"]) # Apache 2 +py_binary( + name = "generate_empty", + srcs = ["generate_empty.py"], + python_version = "PY3", + visibility = ["//visibility:public"], + deps = [":protodoc"], +) + py_binary( name = "protodoc", srcs = ["protodoc.py"], diff --git a/tools/protodoc/generate_empty.py b/tools/protodoc/generate_empty.py new file mode 100644 index 000000000000..dbb4c7a9ffc9 --- /dev/null +++ b/tools/protodoc/generate_empty.py @@ -0,0 +1,48 @@ +# Generate pseudo API docs for extensions that have google.protobuf.Empty +# config. + +import json +import pathlib +import string +import sys + +import protodoc + +EMPTY_EXTENSION_DOCS_TEMPLATE = string.Template("""$header + +$description + +$reflink + +This extension does not have a structured configuration, `google.protobuf.Empty +`_ should be used +instead. + +$extension +""") + + +def GenerateEmptyExtensionsDocs(extension, details, api_extensions_root): + extension_root = pathlib.Path(details['path']) + path = pathlib.Path(api_extensions_root, extension_root, 'empty', extension_root.name + '.rst') + path.parent.mkdir(parents=True, exist_ok=True) + description = details.get('description', '') + reflink = '' + if 'ref' in details: + reflink = '%s %s.' % (details['title'], + protodoc.FormatInternalLink('configuration overview', details['ref'])) + content = EMPTY_EXTENSION_DOCS_TEMPLATE.substitute(header=protodoc.FormatHeader( + '=', details['title']), + description=description, + reflink=reflink, + extension=protodoc.FormatExtension(extension)) + path.write_text(content) + + +if __name__ == '__main__': + empty_extensions_path = sys.argv[1] + api_extensions_root = sys.argv[2] + + empty_extensions = json.loads(pathlib.Path(empty_extensions_path).read_text()) + for extension, details in empty_extensions.items(): + GenerateEmptyExtensionsDocs(extension, details, api_extensions_root) diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index 5c375392fd6f..eb9dbda8ceed 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -4,9 +4,12 @@ # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict +import json import functools import os +import pathlib import re +import string from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin @@ -33,6 +36,46 @@ DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) +# Template for formating extension descriptions. +EXTENSION_TEMPLATE = string.Template("""$anchor +This extension may be referenced by the qualified name *$extension* + +.. note:: + $status + + $security_posture + +""") + +# A map from the extension security postures (as defined in the +# envoy_cc_extension build macro) to human readable text for extension docs. +EXTENSION_SECURITY_POSTURES = { + 'robust_to_untrusted_downstream': + 'This extension is intended to be robust against untrusted downstream traffic. It ' + 'assumes that the upstream is trusted.', + 'robust_to_untrusted_downstream_and_upstream': + 'This extension is intended to be robust against both untrusted downstream and ' + 'upstream traffic.', + 'requires_trusted_downstream_and_upstream': + 'This extension is not hardened and should only be used in deployments' + ' where both the downstream and upstream are trusted.', + 'unknown': + 'This extension has an unknown security posture and should only be ' + 'used in deployments where both the downstream and upstream are ' + 'trusted.', + 'data_plane_agnostic': + 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', +} + +# A map from the extension status value to a human readable text for extension +# docs. +EXTENSION_STATUS_VALUES = { + 'alpha': + 'This extension is functional but has not had substantial production burn time, use only with this caveat.', + 'wip': + 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', +} + class ProtodocError(Exception): """Base error class for the protodoc module.""" @@ -69,7 +112,11 @@ def FormatCommentWithAnnotations(comment, type_name=''): Returns: A string with additional RST from annotations. """ - return annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') + formatted_extension = '' + if annotations.EXTENSION_ANNOTATION in comment.annotations: + extension = comment.annotations[annotations.EXTENSION_ANNOTATION] + formatted_extension = FormatExtension(extension) + return annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') + formatted_extension def MapLines(f, s): @@ -116,6 +163,26 @@ def FormatHeader(style, text): return '%s\n%s\n\n' % (text, style * len(text)) +def FormatExtension(extension): + """Format extension metadata as RST. + + Args: + extension: the name of the extension, e.g. com.acme.foo. + + Returns: + RST formatted extension description. + """ + extension_metadata = json.loads(pathlib.Path( + os.getenv('EXTENSION_DB_PATH')).read_text())[extension] + anchor = FormatAnchor('extension_' + extension) + status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') + security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] + return EXTENSION_TEMPLATE.substitute(anchor=anchor, + extension=extension, + status=status, + security_posture=security_posture) + + def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title @@ -131,11 +198,15 @@ def FormatHeaderFromFile(style, source_code_info, proto_name): anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) + formatted_extension = '' + if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: + extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] + formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( - style, - source_code_info.file_level_annotations[annotations.DOC_TITLE_ANNOTATION]), stripped_comment - return anchor + FormatHeader(style, proto_name), stripped_comment + style, source_code_info.file_level_annotations[ + annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment + return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field):