From b3042ee5ef144875779997e509e46f82d1840fb6 Mon Sep 17 00:00:00 2001 From: Harvey Tuch Date: Tue, 17 Sep 2019 16:18:16 -0400 Subject: [PATCH 1/3] api: protoxform tool and API reformat. This patch introduces a new tool, protoxform, that will be the basis of the v2 -> v3 migration tooling. It operates as a Python protoc plugin, within the same framework as protodoc, and provides the ability to operate on protoc AST input and generate proto output. As a first step, the tool is applied reflexively on v2, and functions as a formatting tool. In later patches, this will be added to check_format/fix_format scripts and CI. Risk level: medium (it's possible that some inadvertent wire changes occur, if they do, this patch should be rolled back). Testing: manual inspection of diff, bazel test //test/..., some grep/diff scripts to ensure we haven't lost any comments. Signed-off-by: Harvey Tuch --- api/envoy/admin/v2alpha/certs.proto | 3 +- api/envoy/admin/v2alpha/clusters.proto | 12 +- api/envoy/admin/v2alpha/config_dump.proto | 42 +- api/envoy/admin/v2alpha/listeners.proto | 2 +- api/envoy/admin/v2alpha/memory.proto | 1 - api/envoy/admin/v2alpha/metrics.proto | 1 + api/envoy/admin/v2alpha/mutex_stats.proto | 1 - api/envoy/admin/v2alpha/server_info.proto | 47 +- api/envoy/admin/v2alpha/tap.proto | 11 +- api/envoy/api/v2/auth/cert.proto | 50 +- api/envoy/api/v2/cds.proto | 565 +++++++++-------- .../api/v2/cluster/circuit_breaker.proto | 3 - api/envoy/api/v2/cluster/filter.proto | 4 +- .../api/v2/cluster/outlier_detection.proto | 26 +- api/envoy/api/v2/core/address.proto | 25 +- api/envoy/api/v2/core/base.proto | 137 ++-- api/envoy/api/v2/core/config_source.proto | 16 +- api/envoy/api/v2/core/grpc_service.proto | 28 +- api/envoy/api/v2/core/health_check.proto | 159 +++-- api/envoy/api/v2/core/http_uri.proto | 11 +- api/envoy/api/v2/core/protocol.proto | 6 +- api/envoy/api/v2/eds.proto | 42 +- api/envoy/api/v2/endpoint/endpoint.proto | 23 +- api/envoy/api/v2/lds.proto | 58 +- api/envoy/api/v2/listener/listener.proto | 44 +- api/envoy/api/v2/listener/quic_config.proto | 4 +- .../api/v2/listener/udp_listener_config.proto | 4 +- api/envoy/api/v2/ratelimit/ratelimit.proto | 6 +- api/envoy/api/v2/rds.proto | 7 +- api/envoy/api/v2/route/route.proto | 595 +++++++++--------- api/envoy/api/v2/srds.proto | 24 +- api/envoy/config/accesslog/v2/als.proto | 10 +- api/envoy/config/accesslog/v2/file.proto | 5 +- api/envoy/config/bootstrap/v2/bootstrap.proto | 85 +-- .../v2alpha/cluster.proto | 4 +- .../config/cluster/redis/redis_cluster.proto | 4 +- .../v2alpha/dns_cache.proto | 11 +- .../config/common/tap/v2alpha/common.proto | 17 +- .../filter/accesslog/v2/accesslog.proto | 81 ++- api/envoy/config/filter/fault/v2/fault.proto | 16 +- .../config/filter/http/buffer/v2/buffer.proto | 6 +- .../config/filter/http/csrf/v2/csrf.proto | 10 +- .../v2alpha/dynamic_forward_proxy.proto | 2 +- .../filter/http/ext_authz/v2/ext_authz.proto | 28 +- .../config/filter/http/fault/v2/fault.proto | 4 +- .../config/filter/http/gzip/v2/gzip.proto | 35 +- .../v2/header_to_metadata.proto | 5 +- .../http/health_check/v2/health_check.proto | 17 +- .../http/ip_tagging/v2/ip_tagging.proto | 11 +- .../http/jwt_authn/v2alpha/config.proto | 19 +- api/envoy/config/filter/http/lua/v2/lua.proto | 2 +- .../original_src/v2alpha1/original_src.proto | 1 - .../http/rate_limit/v2/rate_limit.proto | 8 +- .../config/filter/http/rbac/v2/rbac.proto | 2 - .../config/filter/http/router/v2/router.proto | 20 +- .../config/filter/http/squash/v2/squash.proto | 2 +- .../config/filter/http/tap/v2alpha/tap.proto | 10 +- .../http/transcoder/v2/transcoder.proto | 44 +- .../original_src/v2alpha1/original_src.proto | 1 - .../client_ssl_auth/v2/client_ssl_auth.proto | 7 +- .../dubbo_proxy/v2alpha1/dubbo_proxy.proto | 32 +- .../network/dubbo_proxy/v2alpha1/route.proto | 16 +- .../network/ext_authz/v2/ext_authz.proto | 4 +- .../v2/http_connection_manager.proto | 323 +++++----- .../network/mongo_proxy/v2/mongo_proxy.proto | 4 +- .../network/rate_limit/v2/rate_limit.proto | 12 +- .../config/filter/network/rbac/v2/rbac.proto | 24 +- .../network/redis_proxy/v2/redis_proxy.proto | 118 ++-- .../network/tcp_proxy/v2/tcp_proxy.proto | 116 ++-- .../network/thrift_proxy/v2alpha1/route.proto | 20 +- .../thrift_proxy/v2alpha1/thrift_proxy.proto | 54 +- .../rate_limit/v2alpha1/rate_limit.proto | 8 +- .../grpc_credential/v2alpha/aws_iam.proto | 10 +- .../v2alpha/file_based_metadata.proto | 9 +- .../config/metrics/v2/metrics_service.proto | 6 +- api/envoy/config/metrics/v2/stats.proto | 23 +- .../config/overload/v2alpha/overload.proto | 13 +- api/envoy/config/ratelimit/v2/rls.proto | 6 +- api/envoy/config/rbac/v2/rbac.proto | 46 +- .../fixed_heap/v2alpha/fixed_heap.proto | 2 +- .../v2alpha/injected_resource.proto | 2 +- api/envoy/config/trace/v2/trace.proto | 102 +-- .../transport_socket/tap/v2alpha/tap.proto | 10 +- api/envoy/data/accesslog/v2/accesslog.proto | 80 ++- .../v2alpha/outlier_detection_event.proto | 85 ++- .../core/v2alpha/health_check_event.proto | 44 +- api/envoy/data/tap/v2alpha/transport.proto | 20 +- api/envoy/data/tap/v2alpha/wrapper.proto | 10 +- api/envoy/service/accesslog/v2/als.proto | 20 +- .../service/auth/v2/attribute_context.proto | 4 +- api/envoy/service/auth/v2/external_auth.proto | 12 +- api/envoy/service/discovery/v2/ads.proto | 18 +- api/envoy/service/discovery/v2/rtds.proto | 22 +- api/envoy/service/ratelimit/v2/rls.proto | 16 +- api/envoy/service/tap/v2alpha/common.proto | 32 +- api/envoy/type/http_status.proto | 54 +- api/envoy/type/matcher/metadata.proto | 8 +- api/envoy/type/matcher/number.proto | 2 +- api/envoy/type/matcher/regex.proto | 5 +- api/envoy/type/matcher/string.proto | 10 +- api/envoy/type/percent.proto | 10 +- tools/api_proto_plugin/plugin.py | 5 + tools/api_proto_plugin/traverse.py | 20 +- tools/api_proto_plugin/type_context.py | 58 ++ tools/api_proto_plugin/visitor.py | 15 +- tools/proto_format.sh | 37 ++ tools/protodoc/protodoc.py | 2 +- tools/protoxform/BUILD | 14 + tools/protoxform/protoxform.bzl | 95 +++ tools/protoxform/protoxform.py | 405 ++++++++++++ 110 files changed, 2663 insertions(+), 1824 deletions(-) create mode 100755 tools/proto_format.sh create mode 100644 tools/protoxform/BUILD create mode 100644 tools/protoxform/protoxform.bzl create mode 100755 tools/protoxform/protoxform.py diff --git a/api/envoy/admin/v2alpha/certs.proto b/api/envoy/admin/v2alpha/certs.proto index c6d5e4154aed..04f78ceed142 100644 --- a/api/envoy/admin/v2alpha/certs.proto +++ b/api/envoy/admin/v2alpha/certs.proto @@ -19,7 +19,6 @@ message Certificates { } message Certificate { - // Details of CA certificate. repeated CertificateDetails ca_cert = 1; @@ -48,10 +47,10 @@ message CertificateDetails { } message SubjectAlternateName { - // Subject Alternate Name. oneof name { string dns = 1; + string uri = 2; } } diff --git a/api/envoy/admin/v2alpha/clusters.proto b/api/envoy/admin/v2alpha/clusters.proto index cc2c95110c6a..8119821e9dc9 100644 --- a/api/envoy/admin/v2alpha/clusters.proto +++ b/api/envoy/admin/v2alpha/clusters.proto @@ -45,7 +45,7 @@ message ClusterStatus { // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. - envoy.type.Percent success_rate_ejection_threshold = 3; + type.Percent success_rate_ejection_threshold = 3; // Mapping from host address to the host's current status. repeated HostStatus host_statuses = 4; @@ -65,13 +65,13 @@ message ClusterStatus { // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. - envoy.type.Percent local_origin_success_rate_ejection_threshold = 5; + type.Percent local_origin_success_rate_ejection_threshold = 5; } // Current state of a particular host. message HostStatus { // Address of this host. - envoy.api.v2.core.Address address = 1; + api.v2.core.Address address = 1; // List of stats specific to this host. repeated SimpleMetric stats = 2; @@ -92,7 +92,7 @@ message HostStatus { // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. - envoy.type.Percent success_rate = 4; + type.Percent success_rate = 4; // The host's weight. If not configured, the value defaults to 1. uint32 weight = 5; @@ -115,7 +115,7 @@ message HostStatus { // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. - envoy.type.Percent local_origin_success_rate = 8; + type.Percent local_origin_success_rate = 8; } // Health status for a host. @@ -139,5 +139,5 @@ message HostHealthStatus { // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported // here. // TODO(mrice32): pipe through remaining EDS health status possibilities. - envoy.api.v2.core.HealthStatus eds_health_status = 3; + api.v2.core.HealthStatus eds_health_status = 3; } diff --git a/api/envoy/admin/v2alpha/config_dump.proto b/api/envoy/admin/v2alpha/config_dump.proto index 10c57f06f1b7..15812a9ffd4d 100644 --- a/api/envoy/admin/v2alpha/config_dump.proto +++ b/api/envoy/admin/v2alpha/config_dump.proto @@ -39,7 +39,7 @@ message ConfigDump { // the static portions of an Envoy configuration by reusing the output as the bootstrap // configuration for another Envoy. message BootstrapConfigDump { - envoy.config.bootstrap.v2.Bootstrap bootstrap = 1; + config.bootstrap.v2.Bootstrap bootstrap = 1; // The timestamp when the BootstrapConfig was last updated. google.protobuf.Timestamp last_updated = 2; @@ -49,15 +49,10 @@ message BootstrapConfigDump { // configuration information can be used to recreate an Envoy configuration by populating all // listeners as static listeners or by returning them in a LDS response. message ListenersConfigDump { - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - string version_info = 1; - // Describes a statically loaded listener. message StaticListener { // The listener config. - envoy.api.v2.Listener listener = 1; + api.v2.Listener listener = 1; // The timestamp when the Listener was last updated. google.protobuf.Timestamp last_updated = 2; @@ -72,12 +67,17 @@ message ListenersConfigDump { string version_info = 1; // The listener config. - envoy.api.v2.Listener listener = 2; + api.v2.Listener listener = 2; // The timestamp when the Listener was last updated. google.protobuf.Timestamp last_updated = 3; } + // This is the :ref:`version_info ` in the + // last processed LDS discovery response. If there are only static bootstrap listeners, this field + // will be "". + string version_info = 1; + // The statically loaded listener configs. repeated StaticListener static_listeners = 2; @@ -102,15 +102,10 @@ message ListenersConfigDump { // configuration information can be used to recreate an Envoy configuration by populating all // clusters as static clusters or by returning them in a CDS response. message ClustersConfigDump { - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - string version_info = 1; - // Describes a statically loaded cluster. message StaticCluster { // The cluster config. - envoy.api.v2.Cluster cluster = 1; + api.v2.Cluster cluster = 1; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 2; @@ -125,12 +120,17 @@ message ClustersConfigDump { string version_info = 1; // The cluster config. - envoy.api.v2.Cluster cluster = 2; + api.v2.Cluster cluster = 2; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 3; } + // This is the :ref:`version_info ` in the + // last processed CDS discovery response. If there are only static bootstrap clusters, this field + // will be "". + string version_info = 1; + // The statically loaded cluster configs. repeated StaticCluster static_clusters = 2; @@ -153,7 +153,7 @@ message ClustersConfigDump { message RoutesConfigDump { message StaticRouteConfig { // The route config. - envoy.api.v2.RouteConfiguration route_config = 1; + api.v2.RouteConfiguration route_config = 1; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 2; @@ -166,7 +166,7 @@ message RoutesConfigDump { string version_info = 1; // The route config. - envoy.api.v2.RouteConfiguration route_config = 2; + api.v2.RouteConfiguration route_config = 2; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 3; @@ -189,7 +189,7 @@ message ScopedRoutesConfigDump { string name = 1; // The scoped route configurations. - repeated envoy.api.v2.ScopedRouteConfiguration scoped_route_configs = 2; + repeated api.v2.ScopedRouteConfiguration scoped_route_configs = 2; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 3; @@ -205,7 +205,7 @@ message ScopedRoutesConfigDump { string version_info = 2; // The scoped route configurations. - repeated envoy.api.v2.ScopedRouteConfiguration scoped_route_configs = 3; + repeated api.v2.ScopedRouteConfiguration scoped_route_configs = 3; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 4; @@ -234,7 +234,7 @@ message SecretsConfigDump { // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. - envoy.api.v2.auth.Secret secret = 4; + api.v2.auth.Secret secret = 4; } // StaticSecret specifies statically loaded secret in bootstrap. @@ -248,7 +248,7 @@ message SecretsConfigDump { // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. - envoy.api.v2.auth.Secret secret = 3; + api.v2.auth.Secret secret = 3; } // The statically loaded secrets. diff --git a/api/envoy/admin/v2alpha/listeners.proto b/api/envoy/admin/v2alpha/listeners.proto index 87a4e1d9f739..e84f64540857 100644 --- a/api/envoy/admin/v2alpha/listeners.proto +++ b/api/envoy/admin/v2alpha/listeners.proto @@ -24,5 +24,5 @@ message ListenerStatus { // The actual local address that the listener is listening on. If a listener was configured // to listen on port 0, then this address has the port that was allocated by the OS. - envoy.api.v2.core.Address local_address = 2; + api.v2.core.Address local_address = 2; } diff --git a/api/envoy/admin/v2alpha/memory.proto b/api/envoy/admin/v2alpha/memory.proto index d86e44881056..6173b33cd455 100644 --- a/api/envoy/admin/v2alpha/memory.proto +++ b/api/envoy/admin/v2alpha/memory.proto @@ -12,7 +12,6 @@ option java_package = "io.envoyproxy.envoy.admin.v2alpha"; // values extracted from an internal TCMalloc instance. For more information, see the section of the // docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). message Memory { - // The number of bytes allocated by the heap for Envoy. This is an alias for // `generic.current_allocated_bytes`. uint64 allocated = 1; diff --git a/api/envoy/admin/v2alpha/metrics.proto b/api/envoy/admin/v2alpha/metrics.proto index 9a91c7477be5..c0b489f4e39b 100644 --- a/api/envoy/admin/v2alpha/metrics.proto +++ b/api/envoy/admin/v2alpha/metrics.proto @@ -12,6 +12,7 @@ option java_package = "io.envoyproxy.envoy.admin.v2alpha"; message SimpleMetric { enum Type { COUNTER = 0; + GAUGE = 1; } diff --git a/api/envoy/admin/v2alpha/mutex_stats.proto b/api/envoy/admin/v2alpha/mutex_stats.proto index 272d7224b0e9..682ff5b49354 100644 --- a/api/envoy/admin/v2alpha/mutex_stats.proto +++ b/api/envoy/admin/v2alpha/mutex_stats.proto @@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.admin.v2alpha"; // correspond to core clock frequency. For more information, see the `CycleClock` // [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). message MutexStats { - // The number of individual mutex contentions which have occurred since startup. uint64 num_contentions = 1; diff --git a/api/envoy/admin/v2alpha/server_info.proto b/api/envoy/admin/v2alpha/server_info.proto index 78cc6fa7020a..b43a6b23abd8 100644 --- a/api/envoy/admin/v2alpha/server_info.proto +++ b/api/envoy/admin/v2alpha/server_info.proto @@ -13,20 +13,23 @@ import "google/protobuf/duration.proto"; // Proto representation of the value returned by /server_info, containing // server version/server status information. message ServerInfo { - // Server version. - string version = 1; - enum State { // Server is live and serving traffic. LIVE = 0; + // Server is draining listeners in response to external health checks failing. DRAINING = 1; + // Server has not yet completed cluster manager initialization. PRE_INITIALIZING = 2; + // Server is running the cluster manager initialization callbacks (e.g., RDS). INITIALIZING = 3; } + // Server version. + string version = 1; + // State of the server. State state = 2; @@ -44,6 +47,25 @@ message ServerInfo { } message CommandLineOptions { + enum IpVersion { + v4 = 0; + + v6 = 1; + } + + enum Mode { + // Validate configs and then serve traffic normally. + Serve = 0; + + // Validate configs and exit. + Validate = 1; + + // Completely load and initialize the config, and then exit without running the listener loop. + InitOnly = 2; + } + + reserved 12; + // See :option:`--base-id` for details. uint64 base_id = 1; @@ -65,11 +87,6 @@ message CommandLineOptions { // See :option:`--admin-address-path` for details. string admin_address_path = 6; - enum IpVersion { - v4 = 0; - v6 = 1; - } - // See :option:`--local-address-ip-version` for details. IpVersion local_address_ip_version = 7; @@ -85,8 +102,6 @@ message CommandLineOptions { // See :option:`--log-path` for details. string log_path = 11; - reserved 12; - // See :option:`--service-cluster` for details. string service_cluster = 13; @@ -105,22 +120,12 @@ message CommandLineOptions { // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; - enum Mode { - // Validate configs and then serve traffic normally. - Serve = 0; - - // Validate configs and exit. - Validate = 1; - - // Completely load and initialize the config, and then exit without running the listener loop. - InitOnly = 2; - } - // See :option:`--mode` for details. Mode mode = 19; // max_stats and max_obj_name_len are now unused and have no effect. uint64 max_stats = 20 [deprecated = true]; + uint64 max_obj_name_len = 21 [deprecated = true]; // See :option:`--disable-hot-restart` for details. diff --git a/api/envoy/admin/v2alpha/tap.proto b/api/envoy/admin/v2alpha/tap.proto index 789be14b01c3..d7caf609af52 100644 --- a/api/envoy/admin/v2alpha/tap.proto +++ b/api/envoy/admin/v2alpha/tap.proto @@ -1,20 +1,21 @@ syntax = "proto3"; -import "envoy/service/tap/v2alpha/common.proto"; -import "validate/validate.proto"; - package envoy.admin.v2alpha; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; +import "envoy/service/tap/v2alpha/common.proto"; + +import "validate/validate.proto"; + // The /tap admin request body that is used to configure an active tap session. message TapRequest { // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string.min_bytes = 1]; + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; // The tap configuration to load. - service.tap.v2alpha.TapConfig tap_config = 2 [(validate.rules).message.required = true]; + service.tap.v2alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/api/v2/auth/cert.proto b/api/envoy/api/v2/auth/cert.proto index 87e80b498fca..1710b57f7af2 100644 --- a/api/envoy/api/v2/auth/cert.proto +++ b/api/envoy/api/v2/auth/cert.proto @@ -36,11 +36,11 @@ message TlsParameters { } // Minimum TLS protocol version. By default, it's ``TLSv1_0``. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum.defined_only = true]; + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum.defined_only = true]; + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list // `_ @@ -106,7 +106,7 @@ message TlsParameters { message PrivateKeyProvider { // Private key method provider name. The name must match a // supported private key method provider type. - string provider_name = 1 [(validate.rules).string.min_bytes = 1]; + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; // Private key method provider specific configuration. oneof config_type { @@ -167,7 +167,7 @@ message TlsSessionTicketKeys { // * Keep the session ticket keys at least as secure as your TLS certificate private keys // * Rotate session ticket keys at least daily, and preferably hourly // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 [(validate.rules).repeated .min_items = 1]; + repeated core.DataSource keys = 1 [(validate.rules).repeated = {min_items: 1}]; } message CertificateValidationContext { @@ -201,9 +201,9 @@ message CertificateValidationContext { // // .. code-block:: bash // - // $ openssl x509 -in path/to/client.crt -noout -pubkey \ - // | openssl pkey -pubin -outform DER \ - // | openssl dgst -sha256 -binary \ + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary // | openssl enc -base64 // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= // @@ -223,7 +223,7 @@ message CertificateValidationContext { // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 - [(validate.rules).repeated .items.string = {min_bytes: 44, max_bytes: 44}]; + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. @@ -252,7 +252,7 @@ message CertificateValidationContext { // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 - [(validate.rules).repeated .items.string = {min_bytes: 64, max_bytes: 95}]; + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative Names. If specified, Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified values. @@ -283,6 +283,18 @@ message CertificateValidationContext { // TLS context shared by both client and server TLS contexts. message CommonTlsContext { + message CombinedCertificateValidationContext { + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + // TLS protocol versions, cipher suites etc. TlsParameters tls_params = 1; @@ -296,17 +308,7 @@ message CommonTlsContext { // Configs for fetching TLS certificates via SDS API. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated .max_items = 1]; - - message CombinedCertificateValidationContext { - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message.required = true]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message.required = true]; - }; + [(validate.rules).repeated = {max_items: 1}]; oneof validation_context_type { // How to validate peer certificates. @@ -336,8 +338,6 @@ message CommonTlsContext { // // There is no default for this parameter. If empty, Envoy will not expose ALPN. repeated string alpn_protocols = 4; - - reserved 5; } message UpstreamTlsContext { @@ -345,7 +345,7 @@ message UpstreamTlsContext { CommonTlsContext common_tls_context = 1; // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string.max_bytes = 255]; + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; // If true, server-initiated TLS renegotiation will be allowed. // @@ -389,6 +389,7 @@ message SdsSecretConfig { // When only name is specified, then secret will be loaded from static // resources. string name = 1; + core.ConfigSource sds_config = 2; } @@ -396,9 +397,12 @@ message SdsSecretConfig { message Secret { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. string name = 1; + oneof type { TlsCertificate tls_certificate = 2; + TlsSessionTicketKeys session_ticket_keys = 3; + CertificateValidationContext validation_context = 4; } } diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto index 85e9a3827c3c..4729ef832afa 100644 --- a/api/envoy/api/v2/cds.proto +++ b/api/envoy/api/v2/cds.proto @@ -5,19 +5,18 @@ package envoy.api.v2; option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; - option java_generic_services = true; -import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/cluster/circuit_breaker.proto"; +import "envoy/api/v2/cluster/filter.proto"; +import "envoy/api/v2/cluster/outlier_detection.proto"; +import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/discovery.proto"; import "envoy/api/v2/core/health_check.proto"; import "envoy/api/v2/core/protocol.proto"; -import "envoy/api/v2/cluster/circuit_breaker.proto"; -import "envoy/api/v2/cluster/filter.proto"; -import "envoy/api/v2/cluster/outlier_detection.proto"; +import "envoy/api/v2/discovery.proto"; import "envoy/api/v2/eds.proto"; import "envoy/type/percent.proto"; @@ -29,6 +28,8 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; +// [#protodoc-title: Clusters] + // Return list of all clusters this proxy will load balance to. service ClusterDiscoveryService { rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { @@ -45,24 +46,9 @@ service ClusterDiscoveryService { } } -// [#protodoc-title: Clusters] - // Configuration for a single upstream cluster. // [#comment:next free field: 42] message Cluster { - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // An optional alternative to the cluster name to be used while emitting stats. - // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be - // confused with :ref:`Router Filter Header - // `. - string alt_stat_name = 28; - // Refer to :ref:`service discovery type ` // for an explanation on each type. enum DiscoveryType { @@ -90,45 +76,6 @@ message Cluster { ORIGINAL_DST = 4; } - // Extended cluster type. - message CustomClusterType { - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - google.protobuf.Any typed_config = 2; - } - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - // Configuration for the source of EDS updates for this Cluster. - core.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; - } - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration.gt = {}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; - // Refer to :ref:`load balancer type ` architecture // overview section for information on each type. enum LbPolicy { @@ -160,7 +107,7 @@ message Cluster { // // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. // - ORIGINAL_DST_LB = 4 [deprecated = true]; + ORIGINAL_DST_LB = 4; // Refer to the :ref:`Maglev load balancing policy` // for an explanation. @@ -178,110 +125,6 @@ message Cluster { // configuring this.] LOAD_BALANCING_POLICY_CONFIG = 7; } - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum.defined_only = true]; - - // If the service discovery type is - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS`, - // then hosts is required. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`load_assignment` field instead. - // - repeated core.Address hosts = 7; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes :ref:`hosts` field. - // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` - // once load_assignment is implemented.] - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // Setting this overrides :ref:`hosts` values. - // - ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.HealthCheck health_checks = 8; - - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; - - // Optional :ref:`circuit breaking ` for the cluster. - cluster.CircuitBreakers circuit_breakers = 10; - - // The TLS configuration for connections to the upstream cluster. If no TLS - // configuration is specified, TLS will not be used for new connections. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - auth.UpstreamTlsContext tls_context = 11; - - reserved 12; - - // Additional options when handling HTTP requests. These options will be applicable to both - // HTTP1 and HTTP2 requests. - core.HttpProtocolOptions common_http_protocol_options = 29; - - // Additional options when handling HTTP1 requests. - core.Http1ProtocolOptions http_protocol_options = 13; - - // Even if default HTTP2 protocol options are desired, this field must be - // set so that Envoy will assume that the upstream supports HTTP/2 when - // making new HTTP connection pool connections. Currently, Envoy only - // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - // connections to happen over plain text. - core.Http2ProtocolOptions http2_protocol_options = 14; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map extension_protocol_options = 35; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map typed_extension_protocol_options = 36; - - reserved 15; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. If this setting is not specified, the value defaults to 5000ms. For - // cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration.gt = {}]; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; // When V4_ONLY is selected, the DNS resolver will only perform a lookup for // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will @@ -295,56 +138,46 @@ message Cluster { // ignored. enum DnsLookupFamily { AUTO = 0; + V4_ONLY = 1; + V6_ONLY = 2; } - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum.defined_only = true]; + enum ClusterProtocolSelection { + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + USE_CONFIGURED_PROTOCOL = 0; - // If DNS resolvers are specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used to specify the cluster’s dns resolvers. - // If this setting is not specified, the value defaults to the default - // resolver, which uses /etc/resolv.conf for configuration. For cluster types - // other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - repeated core.Address dns_resolvers = 18; + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + USE_DOWNSTREAM_PROTOCOL = 1; + } - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - cluster.OutlierDetection outlier_detection = 19; + // Extended cluster type. + message CustomClusterType { + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration.gt = {}]; + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + google.protobuf.Any typed_config = 2; + } - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.BindConfig upstream_bind_config = 21; + // Only valid when discovery type is EDS. + message EdsClusterConfig { + // Configuration for the source of EDS updates for this Cluster. + core.ConfigSource eds_config = 1; + + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + } // Optionally divide the endpoints in this cluster into subsets defined by // endpoint metadata and selected by route and weighted cluster metadata. message LbSubsetConfig { - // If NO_FALLBACK is selected, a result // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, // any cluster endpoint may be returned (subject to policy, health checks, @@ -352,49 +185,55 @@ message Cluster { // endpoints matching the values from the default_subset field. enum LbSubsetFallbackPolicy { NO_FALLBACK = 0; + ANY_ENDPOINT = 1; + DEFAULT_SUBSET = 2; } - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum.defined_only = true]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - // Specifications for subsets. message LbSubsetSelector { - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum.defined_only = true]; - // Allows to override top level fallback policy per selector. enum LbSubsetSelectorFallbackPolicy { // If NOT_DEFINED top level config fallback policy is used instead. NOT_DEFINED = 0; + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. NO_FALLBACK = 1; + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned // (subject to policy, health checks, etc). ANY_ENDPOINT = 2; + // If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. DEFAULT_SUBSET = 3; } + + // List of keys to match with the weighted cluster metadata. + repeated string keys = 1; + + // The behavior used when no endpoint subset matches the selected route's + // metadata. + LbSubsetSelectorFallbackPolicy fallback_policy = 2 + [(validate.rules).enum = {defined_only: true}]; } + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; + + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the *envoy.lb* + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + google.protobuf.Struct default_subset = 2; + // For each entry, LbEndpoint.Metadata's // *envoy.lb* namespace is traversed and a subset is created for each unique // combination of key and value. For example: @@ -443,45 +282,43 @@ message Cluster { bool list_as_any = 7; } - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - // Specific configuration for the LeastRequest load balancing policy. message LeastRequestLbConfig { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32.gte = 2]; + google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; } // Specific configuration for the :ref:`RingHash` // load balancing policy. message RingHashLbConfig { - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64.lte = 8388608]; - - reserved 2; - // The hash function used to hash hosts onto the ketama ring. enum HashFunction { // Use `xxHash `_, this is the default hash function. XX_HASH = 0; + // Use `MurmurHash2 `_, this is compatible with // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled // on Linux and not macOS. MURMUR_HASH_2 = 1; } + reserved 2; + + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; + // The hash function used to hash hosts onto the ketama ring. The value defaults to // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum.defined_only = true]; + HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered // to further constrain resource use. See also // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64.lte = 8388608]; + google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; } // Specific configuration for the @@ -500,31 +337,8 @@ message Cluster { bool use_http_header = 1; } - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - // Common configuration for all load balancer implementations. message CommonLbConfig { - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - envoy.type.Percent healthy_panic_threshold = 1; // Configuration for :ref:`zone aware routing // `. message ZoneAwareLbConfig { @@ -532,7 +346,8 @@ message Cluster { // if zone aware routing is configured. If not specified, the default is 100%. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. - envoy.type.Percent routing_enabled = 1; + type.Percent routing_enabled = 1; + // Configures minimum upstream cluster size required for zone aware routing // If upstream cluster size is less than specified, zone aware routing is not performed // even if zone aware routing is configured. If not specified, the default is 6. @@ -546,14 +361,26 @@ message Cluster { // failing service. bool fail_traffic_on_panic = 3; } + // Configuration for :ref:`locality weighted load balancing // ` message LocalityWeightedLbConfig { } + + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // The specified percent will be truncated to the nearest 1%. + type.Percent healthy_panic_threshold = 1; + oneof locality_config_specifier { ZoneAwareLbConfig zone_aware_lb_config = 2; + LocalityWeightedLbConfig locality_weighted_lb_config = 3; } + // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when // the first update happens. This is useful for big clusters, with potentially noisy deploys @@ -596,6 +423,204 @@ message Cluster { bool close_connections_on_host_set_change = 6; } + reserved 12, 15; + + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // An optional alternative to the cluster name to be used while emitting stats. + // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be + // confused with :ref:`Router Filter Header + // `. + string alt_stat_name = 28; + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } + + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig eds_cluster_config = 3; + + // The timeout for new network connections to hosts in the cluster. + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + + // If the service discovery type is + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS`, + // then hosts is required. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`load_assignment` field instead. + // + repeated core.Address hosts = 7; + + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes :ref:`hosts` field. + // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` + // once load_assignment is implemented.] + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + // Setting this overrides :ref:`hosts` values. + // + ClusterLoadAssignment load_assignment = 33; + + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + repeated core.HealthCheck health_checks = 8; + + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + google.protobuf.UInt32Value max_requests_per_connection = 9; + + // Optional :ref:`circuit breaking ` for the cluster. + cluster.CircuitBreakers circuit_breakers = 10; + + // The TLS configuration for connections to the upstream cluster. If no TLS + // configuration is specified, TLS will not be used for new connections. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + auth.UpstreamTlsContext tls_context = 11; + + // Additional options when handling HTTP requests. These options will be applicable to both + // HTTP1 and HTTP2 requests. + core.HttpProtocolOptions common_http_protocol_options = 29; + + // Additional options when handling HTTP1 requests. + core.Http1ProtocolOptions http_protocol_options = 13; + + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + core.Http2ProtocolOptions http2_protocol_options = 14; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map extension_protocol_options = 35; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map typed_extension_protocol_options = 36; + + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. If this setting is not specified, the value defaults to 5000ms. For + // cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration = {gt {}}]; + + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + bool respect_dns_ttl = 39; + + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + repeated core.Address dns_resolvers = 18; + + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + cluster.OutlierDetection outlier_detection = 19; + + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + core.BindConfig upstream_bind_config = 21; + + // Configuration for load balancing subsetting. + LbSubsetConfig lb_subset_config = 22; + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; + + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; + + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + } + // Common configuration for all load balancer implementations. CommonLbConfig common_lb_config = 27; @@ -609,20 +634,11 @@ message Cluster { // the Router filter, the filter name should be specified as *envoy.router*. core.Metadata metadata = 25; - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - // Determines how Envoy selects the protocol used to speak to upstream hosts. ClusterProtocolSelection protocol_selection = 26; // Optional options for upstream connections. - envoy.api.v2.UpstreamConnectionOptions upstream_connection_options = 30; + UpstreamConnectionOptions upstream_connection_options = 30; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. @@ -683,11 +699,14 @@ message LoadBalancingPolicy { message Policy { // Required. The name of the LB policy. string name = 1; + // Optional config for the LB policy. // No more than one of these two fields may be populated. google.protobuf.Struct config = 2; + google.protobuf.Any typed_config = 3; } + // Each client will iterate over the list in order and stop at the first policy that it // supports. This provides a mechanism for starting to use new LB policies that are not yet // supported by all clients. diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto index e36677c89b64..e5eb3bb07289 100644 --- a/api/envoy/api/v2/cluster/circuit_breaker.proto +++ b/api/envoy/api/v2/cluster/circuit_breaker.proto @@ -5,8 +5,6 @@ package envoy.api.v2.cluster; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; import "envoy/api/v2/core/base.proto"; @@ -17,7 +15,6 @@ import "google/protobuf/wrappers.proto"; // :ref:`Circuit breaking` settings can be // specified individually for each defined priority. message CircuitBreakers { - // A Thresholds defines CircuitBreaker settings for a // :ref:`RoutingPriority`. message Thresholds { diff --git a/api/envoy/api/v2/cluster/filter.proto b/api/envoy/api/v2/cluster/filter.proto index 94c683913953..6ecb536f1015 100644 --- a/api/envoy/api/v2/cluster/filter.proto +++ b/api/envoy/api/v2/cluster/filter.proto @@ -5,8 +5,6 @@ package envoy.api.v2.cluster; option java_outer_classname = "FilterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; import "google/protobuf/any.proto"; @@ -18,7 +16,7 @@ import "validate/validate.proto"; message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/api/v2/cluster/outlier_detection.proto b/api/envoy/api/v2/cluster/outlier_detection.proto index d457c8165f49..5b247d898b02 100644 --- a/api/envoy/api/v2/cluster/outlier_detection.proto +++ b/api/envoy/api/v2/cluster/outlier_detection.proto @@ -5,8 +5,6 @@ package envoy.api.v2.cluster; option java_outer_classname = "OutlierDetectionProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -26,26 +24,26 @@ message OutlierDetection { // The time interval between ejection analysis sweeps. This can result in // both new ejections as well as hosts being returned to service. Defaults // to 10000ms or 10s. - google.protobuf.Duration interval = 2 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the // base time multiplied by the number of times the host has been ejected. // Defaults to 30000ms or 30s. - google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; // The maximum % of an upstream cluster that can be ejected due to outlier // detection. Defaults to 10% but will eject at least one host regardless of the value. - google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive 5xx. This setting can be used to disable // ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics. This setting can be used to // disable ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; // The number of hosts in a cluster that must have enough request volume to // detect success rate outliers. If the number of hosts is less than this @@ -77,7 +75,7 @@ message OutlierDetection { // is detected through consecutive gateway failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // Determines whether to distinguish local origin failures from external errors. If set to true // the following configuration parameters are taken into account: @@ -101,7 +99,7 @@ message OutlierDetection { // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics for locally originated errors. @@ -110,12 +108,13 @@ message OutlierDetection { // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // The failure percentage to use when determining failure percentage-based outlier detection. If // the failure percentage of a given host is greater than or equal to this value, it will be // ejected. Defaults to 85. - google.protobuf.UInt32Value failure_percentage_threshold = 16 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value failure_percentage_threshold = 16 + [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // failure percentage statistics. This setting can be used to disable ejection or to ramp it up @@ -123,13 +122,14 @@ message OutlierDetection { // // [#next-major-version: setting this without setting failure_percentage_threshold should be // invalid in v4.] - google.protobuf.UInt32Value enforcing_failure_percentage = 17 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value enforcing_failure_percentage = 17 + [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // local-origin failure percentage statistics. This setting can be used to disable ejection or to // ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. // If the total number of hosts in the cluster is less than this value, failure percentage-based diff --git a/api/envoy/api/v2/core/address.proto b/api/envoy/api/v2/core/address.proto index 362395577fc9..89fd0adb1eb6 100644 --- a/api/envoy/api/v2/core/address.proto +++ b/api/envoy/api/v2/core/address.proto @@ -19,16 +19,19 @@ message Pipe { // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. - string path = 1 [(validate.rules).string.min_bytes = 1]; + string path = 1 [(validate.rules).string = {min_bytes: 1}]; } message SocketAddress { enum Protocol { TCP = 0; + // [#not-implemented-hide:] UDP = 1; } - Protocol protocol = 1 [(validate.rules).enum.defined_only = true]; + + Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; + // The address for this socket. :ref:`Listeners ` will bind // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: @@ -40,15 +43,19 @@ message SocketAddress { // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string.min_bytes = 1]; + string address = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof port_specifier { option (validate.required) = true; - uint32 port_value = 3 [(validate.rules).uint32.lte = 65535]; + + uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; + // This is only valid if :ref:`resolver_name // ` is specified below and the // named resolver is capable of named port resolution. string named_port = 4; } + // The name of the custom resolver. This must have been registered with Envoy. If // this is empty, a context dependent default applies. If the address is a concrete // IP address, no resolution will occur. If address is a hostname this @@ -68,10 +75,12 @@ message TcpKeepalive { // the connection is dead. Default is to use the OS level configuration (unless // overridden, Linux defaults to 9.) google.protobuf.UInt32Value keepalive_probes = 1; + // The number of seconds a connection needs to be idle before keep-alive probes // start being sent. Default is to use the OS level configuration (unless // overridden, Linux defaults to 7200s (ie 2 hours.) google.protobuf.UInt32Value keepalive_time = 2; + // The number of seconds between keep-alive probes. Default is to use the OS // level configuration (unless overridden, Linux defaults to 75s.) google.protobuf.UInt32Value keepalive_interval = 3; @@ -79,7 +88,7 @@ message TcpKeepalive { message BindConfig { // The address to bind to when creating a socket. - SocketAddress source_address = 1 [(validate.rules).message.required = true]; + SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; // Whether to set the *IP_FREEBIND* option when creating the socket. When this // flag is set to true, allows the :ref:`source_address @@ -103,6 +112,7 @@ message Address { option (validate.required) = true; SocketAddress socket_address = 1; + Pipe pipe = 2; } } @@ -111,7 +121,8 @@ message Address { // the subnet mask for a `CIDR `_ range. message CidrRange { // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + // Length of prefix, e.g. 0, 32. - google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32.lte = 128]; + google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; } diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto index 2a778f19afb1..3388d31f4d7c 100644 --- a/api/envoy/api/v2/core/base.proto +++ b/api/envoy/api/v2/core/base.proto @@ -7,6 +7,7 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.core"; import "envoy/api/v2/core/http_uri.proto"; +import "envoy/type/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; @@ -14,10 +15,56 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -import "envoy/type/percent.proto"; - // [#protodoc-title: Common types] +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +enum RoutingPriority { + DEFAULT = 0; + + HIGH = 1; +} + +// HTTP request method. +enum RequestMethod { + METHOD_UNSPECIFIED = 0; + + GET = 1; + + HEAD = 2; + + POST = 3; + + PUT = 4; + + DELETE = 5; + + CONNECT = 6; + + OPTIONS = 7; + + TRACE = 8; + + PATCH = 9; +} + +// Identifies the direction of the traffic relative to the local Envoy. +enum TrafficDirection { + // Default option is unspecified. + UNSPECIFIED = 0; + + // The transport is used for incoming traffic. + INBOUND = 1; + + // The transport is used for outgoing traffic. + OUTBOUND = 2; +} + // Identifies location of where either Envoy runs or where upstream hosts run. message Locality { // Region this :ref:`zone ` belongs to. @@ -110,52 +157,26 @@ message RuntimeUInt32 { uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string.min_bytes = 1]; -} - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -enum RoutingPriority { - DEFAULT = 0; - HIGH = 1; -} - -// HTTP request method. -enum RequestMethod { - METHOD_UNSPECIFIED = 0; - GET = 1; - HEAD = 2; - POST = 3; - PUT = 4; - DELETE = 5; - CONNECT = 6; - OPTIONS = 7; - TRACE = 8; - PATCH = 9; + string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; } // Header name/value pair. message HeaderValue { // Header name. - string key = 1 [(validate.rules).string = {min_bytes: 1, max_bytes: 16384}]; + string key = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 16384}]; // Header value. // // The same :ref:`format specifier ` as used for // :ref:`HTTP access logging ` applies here, however // unknown header values are replaced with the empty string instead of `-`. - string value = 2 [(validate.rules).string.max_bytes = 16384]; + string value = 2 [(validate.rules).string = {max_bytes: 16384}]; } // Header name/value pair plus option to control append behavior. message HeaderValueOption { // Header name/value pair that this option applies to. - HeaderValue header = 1 [(validate.rules).message.required = true]; + HeaderValue header = 1 [(validate.rules).message = {required: true}]; // Should the value be appended? If true (default), the value is appended to // existing values. @@ -173,23 +194,23 @@ message DataSource { option (validate.required) = true; // Local filesystem data source. - string filename = 1 [(validate.rules).string.min_bytes = 1]; + string filename = 1 [(validate.rules).string = {min_bytes: 1}]; // Bytes inlined in the configuration. - bytes inline_bytes = 2 [(validate.rules).bytes.min_len = 1]; + bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string.min_bytes = 1]; + string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; } } // The message specifies how to fetch data from remote and how to verify it. message RemoteDataSource { // The HTTP URI to fetch the remote data. - HttpUri http_uri = 1 [(validate.rules).message.required = true]; + HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string.min_bytes = 1]; + string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; } // Async data source which support async data fetch. @@ -212,7 +233,7 @@ message AsyncDataSource { message TransportSocket { // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. @@ -226,39 +247,47 @@ message TransportSocket { // Generic socket option message. This would be used to set socket options that // might not exist in upstream kernels or precompiled Envoy binaries. message SocketOption { + enum SocketState { + // Socket options are applied after socket creation but before binding the socket to a port + STATE_PREBIND = 0; + + // Socket options are applied after binding the socket to a port but before calling listen() + STATE_BOUND = 1; + + // Socket options are applied after calling listen() + STATE_LISTENING = 2; + } + // An optional name to give this socket option for debugging, etc. // Uniqueness is not required and no special meaning is assumed. string description = 1; + // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP int64 level = 2; + // The numeric name as passed to setsockopt int64 name = 3; + oneof value { option (validate.required) = true; // Because many sockopts take an int value. int64 int_value = 4; + // Otherwise it's a byte buffer. bytes buf_value = 5; } - enum SocketState { - // Socket options are applied after socket creation but before binding the socket to a port - STATE_PREBIND = 0; - // Socket options are applied after binding the socket to a port but before calling listen() - STATE_BOUND = 1; - // Socket options are applied after calling listen() - STATE_LISTENING = 2; - } + // The state in which the option will be applied. When used in BindConfig // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum.defined_only = true]; + SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; } // Runtime derived FractionalPercent with defaults for when the numerator or denominator is not // specified via a runtime key. message RuntimeFractionalPercent { // Default value if the runtime value's for the numerator/denominator keys are not available. - envoy.type.FractionalPercent default_value = 1 [(validate.rules).message.required = true]; + type.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; // Runtime key for a YAML representation of a FractionalPercent. string runtime_key = 2; @@ -271,15 +300,3 @@ message ControlPlane { // the Envoy is connected to. string identifier = 1; } - -// Identifies the direction of the traffic relative to the local Envoy. -enum TrafficDirection { - // Default option is unspecified. - UNSPECIFIED = 0; - - // The transport is used for incoming traffic. - INBOUND = 1; - - // The transport is used for outgoing traffic. - OUTBOUND = 2; -} diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index d86a2104f7d0..f0921d3b47a7 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -22,13 +22,16 @@ message ApiConfigSource { enum ApiType { // Ideally this would be 'reserved 0' but one can't reserve the default // value. Instead we throw an exception if this is ever used. - UNSUPPORTED_REST_LEGACY = 0 [deprecated = true]; + UNSUPPORTED_REST_LEGACY = 0; + // REST-JSON v2 API. The `canonical JSON encoding // `_ for // the v2 protos is used. REST = 1; + // gRPC v2 API. GRPC = 2; + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. @@ -37,7 +40,9 @@ message ApiConfigSource { // Do not use for other xDSes. TODO(fredlas) update/remove this warning when appropriate. DELTA_GRPC = 3; } - ApiType api_type = 1 [(validate.rules).enum.defined_only = true]; + + ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; + // Cluster names should be used only with REST. If > 1 // cluster is defined, clusters will be cycled through if any kind of failure // occurs. @@ -56,7 +61,7 @@ message ApiConfigSource { google.protobuf.Duration refresh_delay = 3; // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration.gt.seconds = 0]; + google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be // rate limited. @@ -80,7 +85,7 @@ message RateLimitSettings { // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens // per second will be used. - google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double.gt = 0.0]; + google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; } // Configuration for :ref:`listeners `, :ref:`clusters @@ -92,6 +97,7 @@ message RateLimitSettings { message ConfigSource { oneof config_source_specifier { option (validate.required) = true; + // Path on the filesystem to source and watch for configuration updates. // // .. note:: @@ -104,8 +110,10 @@ message ConfigSource { // are atomic. The same method of swapping files as is demonstrated in the // :ref:`runtime documentation ` can be used here also. string path = 1; + // API configuration source. ApiConfigSource api_config_source = 2; + // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. AggregatedConfigSource ads = 3; diff --git a/api/envoy/api/v2/core/grpc_service.proto b/api/envoy/api/v2/core/grpc_service.proto index 9b0ba80b93cc..acd86c36da02 100644 --- a/api/envoy/api/v2/core/grpc_service.proto +++ b/api/envoy/api/v2/core/grpc_service.proto @@ -10,8 +10,8 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; import "validate/validate.proto"; @@ -24,16 +24,11 @@ message GrpcService { // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`tls_context // `. - string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } // [#proto-status: draft] message GoogleGrpc { - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string.min_bytes = 1]; - // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. message SslCredentials { // PEM encoded server root certificates. @@ -56,6 +51,7 @@ message GrpcService { message ChannelCredentials { oneof credential_specifier { option (validate.required) = true; + SslCredentials ssl_credentials = 1; // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 @@ -65,21 +61,22 @@ message GrpcService { } } - ChannelCredentials channel_credentials = 2; - message CallCredentials { message ServiceAccountJWTAccessCredentials { string json_key = 1; + uint64 token_lifetime_seconds = 2; } message GoogleIAMCredentials { string authorization_token = 1; + string authority_selector = 2; } message MetadataCredentialsFromPlugin { string name = 1; + oneof config_type { google.protobuf.Struct config = 2; @@ -117,6 +114,13 @@ message GrpcService { } } + // The target URI when using the `Google C++ gRPC client + // `_. SSL credentials will be supplied in + // :ref:`channel_credentials `. + string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; + + ChannelCredentials channel_credentials = 2; + // A set of call credentials that can be composed with `channel credentials // `_. repeated CallCredentials call_credentials = 3; @@ -130,7 +134,7 @@ message GrpcService { // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel @@ -142,6 +146,8 @@ message GrpcService { google.protobuf.Struct config = 6; } + reserved 4; + oneof target_specifier { option (validate.required) = true; @@ -160,8 +166,6 @@ message GrpcService { // request. google.protobuf.Duration timeout = 3; - reserved 4; - // Additional metadata to include in streams initiated to the GrpcService. // This can be used for scenarios in which additional ad hoc authorization // headers (e.g. `x-foo-bar: baz-key`) are to be injected. diff --git a/api/envoy/api/v2/core/health_check.proto b/api/envoy/api/v2/core/health_check.proto index 5bc496fdb68d..078a5a4beef2 100644 --- a/api/envoy/api/v2/core/health_check.proto +++ b/api/envoy/api/v2/core/health_check.proto @@ -21,60 +21,40 @@ import "validate/validate.proto"; // * If health checking is configured for a cluster, additional statistics are emitted. They are // documented :ref:`here `. -message HealthCheck { - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - google.protobuf.Duration timeout = 1 [(validate.rules).duration = { - required: true, - gt: {seconds: 0} - }]; - - // The interval between health checks. - google.protobuf.Duration interval = 2 [(validate.rules).duration = { - required: true, - gt: {seconds: 0} - }]; - - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - google.protobuf.Duration initial_jitter = 20; - - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - google.protobuf.Duration interval_jitter = 3; +// Endpoint health status. +enum HealthStatus { + // The health status is not known. This is interpreted by Envoy as *HEALTHY*. + UNKNOWN = 0; - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - uint32 interval_jitter_percent = 18; + // Healthy. + HEALTHY = 1; - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4; + // Unhealthy. + UNHEALTHY = 2; - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5; + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as *UNHEALTHY*. + DRAINING = 3; - // [#not-implemented-hide:] Non-serving port for health checking. - google.protobuf.UInt32Value alt_port = 6; + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // *UNHEALTHY*. + TIMEOUT = 4; - // Reuse health check connection between health checks. Default is true. - google.protobuf.BoolValue reuse_connection = 7; + // Degraded. + DEGRADED = 5; +} +message HealthCheck { // Describes the encoding of the payload bytes in the payload. message Payload { oneof payload { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string.min_bytes = 1]; + string text = 1 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; @@ -90,7 +70,7 @@ message HealthCheck { // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. - string path = 2 [(validate.rules).string.min_bytes = 1]; + string path = 2 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; @@ -107,8 +87,8 @@ message HealthCheck { // health checked cluster. For more information, including details on header value syntax, see // the documentation on :ref:`custom request headers // `. - repeated core.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated .max_items = 1000]; + repeated HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request that is sent to the // health checked cluster. @@ -120,7 +100,7 @@ message HealthCheck { // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open // semantics of :ref:`Int64Range `. - repeated envoy.type.Int64Range expected_statuses = 9; + repeated type.Int64Range expected_statuses = 9; } message TcpHealthCheck { @@ -162,7 +142,7 @@ message HealthCheck { // Custom health check. message CustomHealthCheck { // The registered name of the custom health checker. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. @@ -173,6 +153,54 @@ message HealthCheck { } } + reserved 10; + + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + google.protobuf.Duration timeout = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // The interval between health checks. + google.protobuf.Duration interval = 2 [(validate.rules).duration = { + required: true + gt {} + }]; + + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + google.protobuf.Duration initial_jitter = 20; + + // An optional jitter amount in milliseconds. If specified, during every + // interval Envoy will add interval_jitter to the wait time. + google.protobuf.Duration interval_jitter = 3; + + // An optional jitter amount as a percentage of interval_ms. If specified, + // during every interval Envoy will add interval_ms * + // interval_jitter_percent / 100 to the wait time. + // + // If interval_jitter_ms and interval_jitter_percent are both set, both of + // them will be used to increase the wait time. + uint32 interval_jitter_percent = 18; + + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for *http* health checking if a host responds with 503 + // this threshold is ignored and the host is considered unhealthy immediately. + google.protobuf.UInt32Value unhealthy_threshold = 4; + + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + google.protobuf.UInt32Value healthy_threshold = 5; + + // [#not-implemented-hide:] Non-serving port for health checking. + google.protobuf.UInt32Value alt_port = 6; + + // Reuse health check connection between health checks. Default is true. + google.protobuf.BoolValue reuse_connection = 7; + oneof health_checker { option (validate.required) = true; @@ -189,9 +217,6 @@ message HealthCheck { CustomHealthCheck custom_health_check = 13; } - reserved 10; - reserved "redis_health_check"; - // The "no traffic interval" is a special health check interval that is used when a cluster has // never had traffic routed to it. This lower interval allows cluster information to be kept up to // date, without sending a potentially large amount of active health checking traffic for no @@ -200,14 +225,14 @@ message HealthCheck { // any other. // // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks @@ -215,14 +240,14 @@ message HealthCheck { // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; // Specifies the path to the :ref:`health check event log `. // If empty, no event log will be written. @@ -233,29 +258,3 @@ message HealthCheck { // The default value is false. bool always_log_health_check_failures = 19; } - -// Endpoint health status. -enum HealthStatus { - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0; - - // Healthy. - HEALTHY = 1; - - // Unhealthy. - UNHEALTHY = 2; - - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3; - - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - TIMEOUT = 4; - - // Degraded. - DEGRADED = 5; -} diff --git a/api/envoy/api/v2/core/http_uri.proto b/api/envoy/api/v2/core/http_uri.proto index debaa4155679..7e4b4dba43ce 100644 --- a/api/envoy/api/v2/core/http_uri.proto +++ b/api/envoy/api/v2/core/http_uri.proto @@ -22,7 +22,7 @@ message HttpUri { // // uri: https://www.googleapis.com/oauth2/v1/certs // - string uri = 1 [(validate.rules).string.min_bytes = 1]; + string uri = 1 [(validate.rules).string = {min_bytes: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or @@ -30,6 +30,7 @@ message HttpUri { // `_. oneof http_upstream_type { option (validate.required) = true; + // A cluster is created in the Envoy "cluster_manager" config // section. This field specifies the cluster name. // @@ -39,10 +40,12 @@ message HttpUri { // // cluster: jwks_cluster // - string cluster = 2 [(validate.rules).string.min_bytes = 1]; + string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 - [(validate.rules).duration.gte = {}, (validate.rules).duration.required = true]; + google.protobuf.Duration timeout = 3 [(validate.rules).duration = { + required: true + gte {} + }]; } diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto index 68103cb00e1b..c45bb7adf7db 100644 --- a/api/envoy/api/v2/core/protocol.proto +++ b/api/envoy/api/v2/core/protocol.proto @@ -56,7 +56,7 @@ message Http2ProtocolOptions { // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. google.protobuf.UInt32Value max_concurrent_streams = 2 - [(validate.rules).uint32 = {gte: 1, lte: 2147483647}]; + [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; // `Initial stream-level flow-control window // `_ size. Valid values range from 65535 @@ -70,12 +70,12 @@ message Http2ProtocolOptions { // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the codec buffers. google.protobuf.UInt32Value initial_stream_window_size = 3 - [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Similar to *initial_stream_window_size*, but for connection-level flow-control // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. google.protobuf.UInt32Value initial_connection_window_size = 4 - [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Allows proxying Websocket and other upgrades over H2 connect. bool allow_connect = 5; diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto index 01982fbf6f95..15518902977a 100644 --- a/api/envoy/api/v2/eds.proto +++ b/api/envoy/api/v2/eds.proto @@ -5,7 +5,6 @@ package envoy.api.v2; option java_outer_classname = "EdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; - option java_generic_services = true; import "envoy/api/v2/discovery.proto"; @@ -13,10 +12,10 @@ import "envoy/api/v2/endpoint/endpoint.proto"; import "envoy/type/percent.proto"; import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -import "google/protobuf/wrappers.proto"; -import "google/protobuf/duration.proto"; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` @@ -48,29 +47,18 @@ service EndpointDiscoveryService { // load_balancing_weight of its locality. First, a locality will be selected, // then an endpoint within that locality will be chose based on its weight. message ClusterLoadAssignment { - // Name of the cluster. This will be the :ref:`service_name - // ` value if specified - // in the cluster :ref:`EdsClusterConfig - // `. - string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; - - // List of endpoints to load balance to. - repeated endpoint.LocalityLbEndpoints endpoints = 2; - - // Map of named endpoints that can be referenced in LocalityLbEndpoints. - map named_endpoints = 5; - // Load balancing policy settings. message Policy { - reserved 1; - message DropOverload { // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string.min_bytes = 1]; + string category = 1 [(validate.rules).string = {min_bytes: 1}]; // Percentage of traffic that should be dropped for the category. - envoy.type.FractionalPercent drop_percentage = 2; + type.FractionalPercent drop_percentage = 2; } + + reserved 1; + // Action to trim the overall incoming traffic to protect the upstream // hosts. This action allows protection in case the hosts are unable to // recover from an outage, or unable to autoscale or unable to handle @@ -106,13 +94,13 @@ message ClusterLoadAssignment { // // Read more at :ref:`priority levels ` and // :ref:`localities `. - google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32.gt = 0]; + google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; // The max time until which the endpoints from this assignment can be used. // If no new assignments are received before this time expires the endpoints // are considered stale and should be marked unhealthy. // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration.gt.seconds = 0]; + google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; // The flag to disable overprovisioning. If it is set to true, // :ref:`overprovisioning factor @@ -126,6 +114,18 @@ message ClusterLoadAssignment { bool disable_overprovisioning = 5; } + // Name of the cluster. This will be the :ref:`service_name + // ` value if specified + // in the cluster :ref:`EdsClusterConfig + // `. + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // List of endpoints to load balance to. + repeated endpoint.LocalityLbEndpoints endpoints = 2; + + // Map of named endpoints that can be referenced in LocalityLbEndpoints. + map named_endpoints = 5; + // Load balancing policy settings. Policy policy = 4; } diff --git a/api/envoy/api/v2/endpoint/endpoint.proto b/api/envoy/api/v2/endpoint/endpoint.proto index 7d614a26bb76..46875a173e83 100644 --- a/api/envoy/api/v2/endpoint/endpoint.proto +++ b/api/envoy/api/v2/endpoint/endpoint.proto @@ -18,6 +18,17 @@ import "validate/validate.proto"; // Upstream host identifier. message Endpoint { + // The optional health check configuration. + message HealthCheckConfig { + // Optional alternative health check port value. + // + // By default the health check address port of an upstream host is the same + // as the host's serving address port. This provides an alternative health + // check port. Setting this with a non-zero value allows an upstream host + // to have different health check address port. + uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; + } + // The upstream host address. // // .. attention:: @@ -29,17 +40,6 @@ message Endpoint { // and will be resolved via DNS. core.Address address = 1; - // The optional health check configuration. - message HealthCheckConfig { - // Optional alternative health check port value. - // - // By default the health check address port of an upstream host is the same - // as the host's serving address port. This provides an alternative health - // check port. Setting this with a non-zero value allows an upstream host - // to have different health check address port. - uint32 port_value = 1 [(validate.rules).uint32.lte = 65535]; - } - // The optional health check configuration is used as configuration for the // health checker to contact the health checked host. // @@ -55,6 +55,7 @@ message LbEndpoint { // Upstream host identifier or a named reference. oneof host_identifier { Endpoint endpoint = 1; + string endpoint_name = 5; } diff --git a/api/envoy/api/v2/lds.proto b/api/envoy/api/v2/lds.proto index a26d64cab5b4..1b515d5d6592 100644 --- a/api/envoy/api/v2/lds.proto +++ b/api/envoy/api/v2/lds.proto @@ -5,7 +5,6 @@ package envoy.api.v2; option java_outer_classname = "LdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; - option java_generic_services = true; import "envoy/api/v2/core/address.proto"; @@ -44,6 +43,34 @@ service ListenerDiscoveryService { // [#comment:next free field: 19] message Listener { + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // [#not-implemented-hide:] + message DeprecatedV1 { + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated in v2, all Listeners will bind to their port. An + // additional filter chain must be created for every original destination + // port this listener may redirect to in v2, with the original port + // specified in the FilterChainMatch destination_port field. + // + // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] + google.protobuf.BoolValue bind_to_port = 1; + } + + reserved 14; + // The unique name by which this listener is known. If no name is provided, // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically // updated or removed via :ref:`LDS ` a unique name must be provided. @@ -52,7 +79,7 @@ message Listener { // The address that the listener should listen on. In general, the address must be unique, though // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on // Linux as the actual port will be allocated by the OS. - core.Address address = 2 [(validate.rules).message.required = true]; + core.Address address = 2 [(validate.rules).message = {required: true}]; // A list of filter chains to consider for this listener. The // :ref:`FilterChain ` with the most specific @@ -87,34 +114,9 @@ message Listener { // Listener metadata. core.Metadata metadata = 6; - // [#not-implemented-hide:] - message DeprecatedV1 { - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // This is deprecated in v2, all Listeners will bind to their port. An - // additional filter chain must be created for every original destination - // port this listener may redirect to in v2, with the original port - // specified in the FilterChainMatch destination_port field. - // - // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] - google.protobuf.BoolValue bind_to_port = 1; - } - // [#not-implemented-hide:] DeprecatedV1 deprecated_v1 = 7; - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - // The type of draining to perform at a listener-wide level. DrainType drain_type = 8; @@ -188,8 +190,6 @@ message Listener { // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - reserved 14; - // Specifies the intended direction of the traffic relative to the local Envoy. core.TrafficDirection traffic_direction = 16; diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto index f6dcecc70805..a5afa2ff044f 100644 --- a/api/envoy/api/v2/listener/listener.proto +++ b/api/envoy/api/v2/listener/listener.proto @@ -5,11 +5,9 @@ package envoy.api.v2.listener; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy::Api::V2::ListenerNS"; -import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; @@ -22,9 +20,11 @@ import "validate/validate.proto"; // Listener :ref:`configuration overview ` message Filter { + reserved 3; + // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. @@ -33,8 +33,6 @@ message Filter { google.protobuf.Any typed_config = 4; } - - reserved 3; } // Specifies the match criteria for selecting a specific filter chain for a @@ -66,9 +64,22 @@ message Filter { // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] message FilterChainMatch { + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + + // Match a connection originating from the same host. + LOCAL = 1; + + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + reserved 1; + // Optional destination port to consider when use_original_dst is set on the // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {gte: 1, lte: 65535}]; + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; // If non-empty, an IP address and prefix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. @@ -82,17 +93,8 @@ message FilterChainMatch { // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - // Match a connection originating from the same host. - LOCAL = 1; - // Match a connection originating from a different host. - EXTERNAL = 2; - } - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum.defined_only = true]; + ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; // The criteria is satisfied if the source IP address of the downstream // connection is contained in at least one of the specified subnets. If the @@ -103,7 +105,8 @@ message FilterChainMatch { // The criteria is satisfied if the source port of the downstream connection // is contained in at least one of the specified ports. If the parameter is // not specified, the source port is ignored. - repeated uint32 source_ports = 7 [(validate.rules).repeated .items.uint32 = {gte: 1, lte: 65535}]; + repeated uint32 source_ports = 7 + [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining // a filter chain match. Those values will be compared against the server names of a new @@ -151,9 +154,6 @@ message FilterChainMatch { // and matching on values other than ``h2`` is going to lead to a lot of false negatives, // unless all connecting clients are known to use ALPN. repeated string application_protocols = 10; - - reserved 1; - reserved "sni_domains"; } // A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and @@ -194,7 +194,7 @@ message FilterChain { message ListenerFilter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. diff --git a/api/envoy/api/v2/listener/quic_config.proto b/api/envoy/api/v2/listener/quic_config.proto index 95ffc3cdf319..e4e3fee3c0d4 100644 --- a/api/envoy/api/v2/listener/quic_config.proto +++ b/api/envoy/api/v2/listener/quic_config.proto @@ -2,11 +2,9 @@ syntax = "proto3"; package envoy.api.v2.listener; -option java_outer_classname = "ListenerProto"; +option java_outer_classname = "QuicConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy::Api::V2::ListenerNS"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/api/v2/listener/udp_listener_config.proto b/api/envoy/api/v2/listener/udp_listener_config.proto index 28d8233f5ff0..a2fa43899166 100644 --- a/api/envoy/api/v2/listener/udp_listener_config.proto +++ b/api/envoy/api/v2/listener/udp_listener_config.proto @@ -5,11 +5,9 @@ package envoy.api.v2.listener; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy::Api::V2::ListenerNS"; -import "google/protobuf/struct.proto"; import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; // [#protodoc-title: Udp Listener Config] // Listener :ref:`configuration overview ` diff --git a/api/envoy/api/v2/ratelimit/ratelimit.proto b/api/envoy/api/v2/ratelimit/ratelimit.proto index 6f4cd6258283..af910e3938ba 100644 --- a/api/envoy/api/v2/ratelimit/ratelimit.proto +++ b/api/envoy/api/v2/ratelimit/ratelimit.proto @@ -54,12 +54,12 @@ import "validate/validate.proto"; message RateLimitDescriptor { message Entry { // Descriptor key. - string key = 1 [(validate.rules).string.min_bytes = 1]; + string key = 1 [(validate.rules).string = {min_bytes: 1}]; // Descriptor value. - string value = 2 [(validate.rules).string.min_bytes = 1]; + string value = 2 [(validate.rules).string = {min_bytes: 1}]; } // Descriptor entries. - repeated Entry entries = 1 [(validate.rules).repeated .min_items = 1]; + repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto index 9fabaf28af80..120c4bd4e32f 100644 --- a/api/envoy/api/v2/rds.proto +++ b/api/envoy/api/v2/rds.proto @@ -5,7 +5,6 @@ package envoy.api.v2; option java_outer_classname = "RdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; - option java_generic_services = true; import "envoy/api/v2/core/base.proto"; @@ -90,7 +89,7 @@ message RouteConfiguration { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // that the connection manager encodes. @@ -103,7 +102,7 @@ message RouteConfiguration { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // routed by the HTTP connection manager. @@ -128,5 +127,5 @@ message RouteConfiguration { // [#not-implemented-hide:] message Vhds { // Configuration source specifier for VHDS. - envoy.api.v2.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; + core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index ff7fa88a81de..aafcfcb0cfac 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -5,7 +5,6 @@ package envoy.api.v2.route; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.route"; -option java_generic_services = true; import "envoy/api/v2/core/base.proto"; import "envoy/type/matcher/regex.proto"; @@ -31,9 +30,24 @@ import "validate/validate.proto"; // upstream cluster to route to or whether to perform a redirect. // [#comment:next free field: 17] message VirtualHost { + enum TlsRequirementType { + // No TLS requirement for the virtual host. + NONE = 0; + + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + EXTERNAL_ONLY = 1; + + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + ALL = 2; + } + + reserved 9; + // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. @@ -51,25 +65,12 @@ message VirtualHost { // The longest wildcards match first. // Only a single virtual host in the entire route configuration can match on ``*``. A domain // must be unique across all virtual hosts or the config will fail to load. - repeated string domains = 2 [(validate.rules).repeated .min_items = 1]; + repeated string domains = 2 [(validate.rules).repeated = {min_items: 1}]; // The list of routes that will be matched, in order, for incoming requests. // The first route that matches will be used. repeated Route routes = 3; - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - // Specifies the type of TLS enforcement the virtual host expects. If this option is not // specified, there is no TLS requirement for the virtual host. TlsRequirementType require_tls = 4; @@ -89,7 +90,7 @@ message VirtualHost { // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. @@ -102,7 +103,7 @@ message VirtualHost { // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. @@ -111,8 +112,6 @@ message VirtualHost { // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; - reserved 9; - // The per_filter_config field can be used to provide virtual host-specific // configurations for filters. The key should match the filter name, such as // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter @@ -157,11 +156,13 @@ message VirtualHost { // `. // [#comment:next free field: 15] message Route { + reserved 6; + // Name for the route. string name = 14; // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message.required = true]; + RouteMatch match = 1 [(validate.rules).message = {required: true}]; oneof action { option (validate.required) = true; @@ -186,8 +187,6 @@ message Route { // Decorator for the matched route. Decorator decorator = 5; - reserved 6; - // The per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter @@ -209,7 +208,7 @@ message Route { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // matching this route. @@ -222,7 +221,7 @@ message Route { // details on header value syntax, see the documentation on // :ref:`custom request headers `. repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. @@ -242,9 +241,11 @@ message Route { // [#comment:next free field: 11] message WeightedCluster { message ClusterWeight { + reserved 7; + // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, @@ -267,7 +268,7 @@ message WeightedCluster { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. @@ -281,14 +282,12 @@ message WeightedCluster { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. repeated string response_headers_to_remove = 6; - reserved 7; - // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter @@ -305,11 +304,11 @@ message WeightedCluster { } // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; // Specifies the total weight across all clusters. The sum of all cluster weights must equal this // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32.gte = 1]; + google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; // Specifies the runtime key prefix that should be used to construct the // runtime keys associated with each cluster. When the *runtime_key_prefix* is @@ -323,6 +322,11 @@ message WeightedCluster { } message RouteMatch { + message GrpcRouteMatchOptions { + } + + reserved 5; + oneof path_specifier { option (validate.required) = true; @@ -350,7 +354,7 @@ message RouteMatch { // .. attention:: // This field has been deprecated in favor of `safe_regex` as it is not safe for use with // untrusted input in all cases. - string regex = 3 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string regex = 3 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // If specified, the route is a regular expression rule meaning that the // regex must match the *:path* header once the query string is removed. The entire path @@ -364,15 +368,13 @@ message RouteMatch { // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] - type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message.required = true]; + type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; } // Indicates that prefix/path matching should be case insensitive. The default // is true. google.protobuf.BoolValue case_sensitive = 4; - reserved 5; - // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the @@ -405,9 +407,6 @@ message RouteMatch { // query string for a match to occur. repeated QueryParameterMatcher query_parameters = 7; - message GrpcRouteMatchOptions { - } - // If specified, only gRPC requests will be matched. The router will check // that the content-type header has a application/grpc or one of the various // application/grpc+ values. @@ -432,7 +431,7 @@ message CorsPolicy { // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for // use with untrusted input in all cases. repeated string allow_origin_regex = 8 - [(validate.rules).repeated .items.string.max_bytes = 1024, deprecated = true]; + [(validate.rules).repeated = {items {string {max_bytes: 1024}}}, deprecated = true]; // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. @@ -489,12 +488,173 @@ message CorsPolicy { // [#comment:next free field: 30] message RouteAction { + enum ClusterNotFoundResponseCode { + // HTTP status code - 503 Service Unavailable. + SERVICE_UNAVAILABLE = 0; + + // HTTP status code - 404 Not Found. + NOT_FOUND = 1; + } + + // Configures :ref:`internal redirect ` behavior. + enum InternalRedirectAction { + PASS_THROUGH_INTERNAL_REDIRECT = 0; + + HANDLE_INTERNAL_REDIRECT = 1; + } + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is + // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. + message RequestMirrorPolicy { + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If not specified, all requests to the target cluster will be mirrored. If + // specified, Envoy will lookup the runtime key to get the % of requests to + // mirror. Valid values are from 0 to 10000, allowing for increments of + // 0.01% of requests to be mirrored. If the runtime key is specified in the + // configuration but not present in runtime, 0 is the default and thus 0% of + // requests will be mirrored. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`runtime_fraction + // ` field instead. + string runtime_key = 2 [deprecated = true]; + + // If both :ref:`runtime_key + // ` and this field are not + // specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the `runtime_key` field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a :ref:`FractionalPercent ` proto represented + // as JSON/YAML and may also be represented as an integer with the assumption that the value + // is an integral percentage out of 100. For instance, a runtime key lookup returning the + // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is + // HUNDRED. This is behaviour is different to that of the deprecated `runtime_key` field, + // where the implicit denominator is 10000. + core.RuntimeFractionalPercent runtime_fraction = 3; + } + + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + // `. + message HashPolicy { + message Header { + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // Envoy supports two types of cookie affinity: + // + // 1. Passive. Envoy takes a cookie that's present in the cookies header and + // hashes on its value. + // + // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + // on the first request from the client in its response to the client, + // based on the endpoint the request gets sent to. The client then + // presents this on the next and all subsequent requests. The hash of + // this is sufficient to ensure these requests get sent to the same + // endpoint. The cookie is generated by hashing the source and + // destination ports and addresses so that multiple independent HTTP2 + // streams on the same connection will independently receive the same + // cookie, even if they arrive at the Envoy simultaneously. + message Cookie { + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + google.protobuf.Duration ttl = 2; + + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + string path = 3; + } + + message ConnectionProperties { + // Hash on source IP address. + bool source_ip = 1; + } + + oneof policy_specifier { + option (validate.required) = true; + + // Header hash policy. + Header header = 1; + + // Cookie hash policy. + Cookie cookie = 2; + + // Connection properties hash policy. + ConnectionProperties connection_properties = 3; + } + + // The flag that shortcircuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; + } + + // Allows enabling and disabling upgrades on a per-route basis. + // This overrides any enabled/disabled upgrade filter chain specified in the + // HttpConnectionManager + // :ref:upgrade_configs` + // ` + // but does not affect any custom filter chain specified there. + message UpgradeConfig { + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + string upgrade_type = 1; + + // Determines if upgrades are available on this route. Defaults to true. + google.protobuf.BoolValue enabled = 2; + } + + reserved 12, 18, 19, 16, 22, 21; + oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the @@ -505,7 +665,7 @@ message RouteAction { // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - string cluster_header = 2 [(validate.rules).string.min_bytes = 1]; + string cluster_header = 2 [(validate.rules).string = {min_bytes: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -515,18 +675,10 @@ message RouteAction { WeightedCluster weighted_clusters = 3; } - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - // The HTTP status code to use when configured cluster is not found. // The default response code is 503 Service Unavailable. ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum.defined_only = true]; + [(validate.rules).enum = {defined_only: true}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what's set in this field will be considered @@ -625,55 +777,6 @@ message RouteAction { // (e.g.: policies are not merged, most internal one becomes the enforced policy). RetryPolicy retry_policy = 9; - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; - - // If not specified, all requests to the target cluster will be mirrored. If - // specified, Envoy will lookup the runtime key to get the % of requests to - // mirror. Valid values are from 0 to 10000, allowing for increments of - // 0.01% of requests to be mirrored. If the runtime key is specified in the - // configuration but not present in runtime, 0 is the default and thus 0% of - // requests will be mirrored. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`runtime_fraction - // ` field instead. - string runtime_key = 2 [deprecated = true]; - - // If both :ref:`runtime_key - // ` and this field are not - // specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a :ref:`FractionalPercent ` proto represented - // as JSON/YAML and may also be represented as an integer with the assumption that the value - // is an integral percentage out of 100. For instance, a runtime key lookup returning the - // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is - // HUNDRED. This is behaviour is different to that of the deprecated `runtime_key` field, - // where the implicit denominator is 10000. - core.RuntimeFractionalPercent runtime_fraction = 3; - } - // Indicates that the route has a request mirroring policy. RequestMirrorPolicy request_mirror_policy = 10; @@ -682,10 +785,6 @@ message RouteAction { // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] core.RoutingPriority priority = 11; - reserved 12; - reserved 18; - reserved 19; - // Specifies a set of rate limit configurations that could be applied to the // route. repeated RateLimit rate_limits = 13; @@ -696,85 +795,6 @@ message RouteAction { // request. google.protobuf.BoolValue include_vh_rate_limits = 14; - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - message HashPolicy { - message Header { - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 [(validate.rules).string.min_bytes = 1]; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - // Hash on source IP address. - bool source_ip = 1; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - } - - // The flag that shortcircuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to // route the request. The method of combination is deterministic such that @@ -789,14 +809,9 @@ message RouteAction { // ignoring the rest of the hash policy list. repeated HashPolicy hash_policy = 15; - reserved 16; - reserved 22; - // Indicates that the route has a CORS policy. CorsPolicy cors = 17; - reserved 21; - // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, // or its default value (infinity) instead of @@ -818,27 +833,8 @@ message RouteAction { // infinity). google.protobuf.Duration grpc_timeout_offset = 28; - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:upgrade_configs` - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1; - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - }; repeated UpgradeConfig upgrade_configs = 25; - // Configures :ref:`internal redirect ` behavior. - enum InternalRedirectAction { - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } InternalRedirectAction internal_redirect_action = 26; // Indicates that the route has a hedge policy. Note that if this is set, @@ -850,6 +846,43 @@ message RouteAction { // HTTP retry :ref:`architecture overview `. // [#comment:next free field: 10] message RetryPolicy { + message RetryPriority { + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + message RetryHostPredicate { + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + message RetryBackOff { + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the `base_interval` if set. The default is 10 times the + // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; + } + // Specifies the conditions under which retry takes place. These are the same // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. @@ -873,29 +906,11 @@ message RetryPolicy { // would have been exhausted. google.protobuf.Duration per_try_timeout = 3; - message RetryPriority { - string name = 1 [(validate.rules).string.min_bytes = 1]; - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; - } - } - // Specifies an implementation of a RetryPriority which is used to determine the // distribution of load across priorities used for retries. Refer to // :ref:`retry plugin configuration ` for more details. RetryPriority retry_priority = 4; - message RetryHostPredicate { - string name = 1 [(validate.rules).string.min_bytes = 1]; - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; - } - } - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host // for retries. If any of the predicates reject the host, host selection will be reattempted. // Refer to :ref:`retry plugin configuration ` for more @@ -910,23 +925,6 @@ message RetryPolicy { // HTTP status codes that should trigger a retry in addition to those specified by retry_on. repeated uint32 retriable_status_codes = 7; - message RetryBackOff { - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true, - gt: {seconds: 0} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration.gt = {seconds: 0}]; - } - // Specifies parameters that control retry back off. This parameter is optional, in which case the // default base interval is 25 milliseconds or, if set, the current value of the // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times @@ -946,13 +944,13 @@ message HedgePolicy { // Must be at least 1. // Defaults to 1. // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32.gte = 1]; + google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; // Specifies a probability that an additional upstream request should be sent // on top of what is specified by initial_requests. // Defaults to 0. // [#not-implemented-hide:] - envoy.type.FractionalPercent additional_request_chance = 2; + type.FractionalPercent additional_request_chance = 2; // Indicates that a hedged request should be sent when the per-try timeout // is hit. This will only occur if the retry policy also indicates that a @@ -966,6 +964,23 @@ message HedgePolicy { } message RedirectAction { + enum RedirectResponseCode { + // Moved Permanently HTTP Status Code - 301. + MOVED_PERMANENTLY = 0; + + // Found HTTP Status Code - 302. + FOUND = 1; + + // See Other HTTP Status Code - 303. + SEE_OTHER = 2; + + // Temporary Redirect HTTP Status Code - 307. + TEMPORARY_REDIRECT = 3; + + // Permanent Redirect HTTP Status Code - 308. + PERMANENT_REDIRECT = 4; + } + // When the scheme redirection take place, the following rules apply: // 1. If the source URI scheme is `http` and the port is explicitly // set to `:80`, the port will be removed after the redirection @@ -974,11 +989,14 @@ message RedirectAction { oneof scheme_rewrite_specifier { // The scheme portion of the URL will be swapped with "https". bool https_redirect = 4; + // The scheme portion of the URL will be swapped with this value. string scheme_redirect = 7; } + // The host portion of the URL will be swapped with this value. string host_redirect = 1; + // The port value of the URL will be swapped with this value. uint32 port_redirect = 8; @@ -997,26 +1015,9 @@ message RedirectAction { string prefix_rewrite = 5; } - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - // The HTTP status code to use in the redirect response. The default response // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum.defined_only = true]; + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; // Indicates that during redirection, the query portion of the URL will // be removed. Default value is false. @@ -1025,7 +1026,7 @@ message RedirectAction { message DirectResponseAction { // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {gte: 100, lt: 600}]; + uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; // Specifies the content of the response body. If this setting is omitted, // no body is included in the generated response. @@ -1047,25 +1048,24 @@ message Decorator { // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. - string operation = 1 [(validate.rules).string.min_bytes = 1]; + string operation = 1 [(validate.rules).string = {min_bytes: 1}]; } message Tracing { - // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% - envoy.type.FractionalPercent client_sampling = 1; + type.FractionalPercent client_sampling = 1; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.FractionalPercent random_sampling = 2; + type.FractionalPercent random_sampling = 2; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random @@ -1075,7 +1075,7 @@ message Tracing { // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.FractionalPercent overall_sampling = 3; + type.FractionalPercent overall_sampling = 3; } // A virtual cluster is a way of specifying a regex matching rule against @@ -1109,7 +1109,7 @@ message VirtualCluster { // .. attention:: // This field has been deprecated in favor of `headers` as it is not safe for use with // untrusted input in all cases. - string pattern = 1 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string pattern = 1 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // Specifies a list of header matchers to use for matching requests. Each specified header must // match. The pseudo-headers `:path` and `:method` can be used to match the request path and @@ -1119,7 +1119,7 @@ message VirtualCluster { // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string.min_bytes = 1]; + string name = 2 [(validate.rules).string = {min_bytes: 1}]; // Optionally specifies the HTTP method to match on. For example GET, PUT, // etc. @@ -1131,18 +1131,6 @@ message VirtualCluster { // Global rate limiting :ref:`architecture overview `. message RateLimit { - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32.lte = 10]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - message Action { // The following descriptor entry is appended to the descriptor: // @@ -1183,10 +1171,10 @@ message RateLimit { // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. - string header_name = 1 [(validate.rules).string.min_bytes = 1]; + string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string.min_bytes = 1]; + string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; } // The following descriptor entry is appended to the descriptor and is populated using the @@ -1205,7 +1193,7 @@ message RateLimit { // ("generic_key", "") message GenericKey { // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; } // The following descriptor entry is appended to the descriptor: @@ -1215,7 +1203,7 @@ message RateLimit { // ("header_match", "") message HeaderValueMatch { // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a @@ -1228,7 +1216,7 @@ message RateLimit { // specified headers in the config. A match will happen if all the // headers in the config are present in the request with the same values // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated .min_items = 1]; + repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } oneof action_specifier { @@ -1254,13 +1242,25 @@ message RateLimit { } } + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + // A list of actions that are to be applied for this rate limit configuration. // Order matters as the actions are processed sequentially and the descriptor // is composed by appending descriptor entries in that sequence. If an action // cannot append a descriptor entry, no descriptor is generated for the // configuration. See :ref:`composing actions // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated .min_items = 1]; + repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; } // .. attention:: @@ -1288,11 +1288,10 @@ message RateLimit { // // [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] message HeaderMatcher { - // Specifies the name of the header in the request. - string name = 1 [(validate.rules).string.min_bytes = 1]; + reserved 2, 3; - reserved 2; - reserved 3; + // Specifies the name of the header in the request. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { @@ -1313,7 +1312,7 @@ message HeaderMatcher { // .. attention:: // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use // with untrusted input in all cases. - string regex_match = 5 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string regex_match = 5 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the @@ -1331,7 +1330,7 @@ message HeaderMatcher { // // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, // "-1somestring" - envoy.type.Int64Range range_match = 6; + type.Int64Range range_match = 6; // If specified, header match will be performed based on whether the header is in the // request. @@ -1343,7 +1342,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string.min_bytes = 1]; + string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1351,7 +1350,7 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string.min_bytes = 1]; + string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; } // If specified, the match result will be inverted before checking. Defaults to false. @@ -1368,7 +1367,7 @@ message HeaderMatcher { message QueryParameterMatcher { // Specifies the name of a key that must be present in the requested // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1, max_bytes: 1024}]; + string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; // Specifies the value of the key. If the value is absent, a request // that contains the key in its query string will match, whether the @@ -1389,7 +1388,7 @@ message QueryParameterMatcher { oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. - type.matcher.StringMatcher string_match = 5 [(validate.rules).message.required = true]; + type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; diff --git a/api/envoy/api/v2/srds.proto b/api/envoy/api/v2/srds.proto index a51426af01b7..0d74594cfe48 100644 --- a/api/envoy/api/v2/srds.proto +++ b/api/envoy/api/v2/srds.proto @@ -2,15 +2,17 @@ syntax = "proto3"; package envoy.api.v2; -import "envoy/api/v2/discovery.proto"; -import "google/api/annotations.proto"; -import "validate/validate.proto"; - option java_outer_classname = "SrdsProto"; -option java_package = "io.envoyproxy.envoy.api.v2"; option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v2"; option java_generic_services = true; +import "envoy/api/v2/discovery.proto"; + +import "google/api/annotations.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` // @@ -99,9 +101,6 @@ service ScopedRoutesDiscoveryService { // [#comment:next free field: 4] // [#proto-status: experimental] message ScopedRouteConfiguration { - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string.min_bytes = 1]; - // Specifies a key which is matched against the output of the // :ref:`scope_key_builder` // specified in the HttpConnectionManager. The matching is done per HTTP @@ -120,14 +119,17 @@ message ScopedRouteConfiguration { // The ordered set of fragments to match against. The order must match the // fragments in the corresponding // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated .min_items = 1]; + repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } + // The name assigned to the routing scope. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated // with this scope. - string route_configuration_name = 2 [(validate.rules).string.min_bytes = 1]; + string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; // The key to match against. - Key key = 3 [(validate.rules).message.required = true]; + Key key = 3 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/accesslog/v2/als.proto b/api/envoy/config/accesslog/v2/als.proto index c02835dbbc56..1f7a0e61d42c 100644 --- a/api/envoy/config/accesslog/v2/als.proto +++ b/api/envoy/config/accesslog/v2/als.proto @@ -20,7 +20,7 @@ import "validate/validate.proto"; // populate :ref:`StreamAccessLogsMessage.http_logs // `. message HttpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers // `. @@ -38,7 +38,7 @@ message HttpGrpcAccessLogConfig { // Configuration for the built-in *envoy.tcp_grpc_access_log* type. This configuration will // populate *StreamAccessLogsMessage.tcp_logs*. message TcpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; } // Common configuration for gRPC access logs. @@ -46,15 +46,15 @@ message CommonGrpcAccessLogConfig { // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier // `. This allows the // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string.min_bytes = 1]; + string log_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The gRPC service for the access log service. - envoy.api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; + api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to // 1 second. - google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it diff --git a/api/envoy/config/accesslog/v2/file.proto b/api/envoy/config/accesslog/v2/file.proto index b88529a3251d..9ed71469882b 100644 --- a/api/envoy/config/accesslog/v2/file.proto +++ b/api/envoy/config/accesslog/v2/file.proto @@ -6,9 +6,10 @@ option java_outer_classname = "FileProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; -import "validate/validate.proto"; import "google/protobuf/struct.proto"; +import "validate/validate.proto"; + // [#protodoc-title: File access log] // Custom configuration for an :ref:`AccessLog ` @@ -16,7 +17,7 @@ import "google/protobuf/struct.proto"; // AccessLog. message FileAccessLog { // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string.min_bytes = 1]; + string path = 1 [(validate.rules).string = {min_bytes: 1}]; // Access log format. Envoy supports :ref:`custom access log formats // ` as well as a :ref:`default format diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto index 5a3447167b81..5fa5805246ad 100644 --- a/api/envoy/config/bootstrap/v2/bootstrap.proto +++ b/api/envoy/config/bootstrap/v2/bootstrap.proto @@ -1,8 +1,3 @@ -// [#protodoc-title: Bootstrap] -// This proto is supplied via the :option:`-c` CLI flag and acts as the root -// of the Envoy v2 configuration. See the :ref:`v2 configuration overview -// ` for more detail. - syntax = "proto3"; package envoy.config.bootstrap.v2; @@ -11,31 +6,32 @@ option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.bootstrap.v2"; +import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/cds.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/auth/cert.proto"; import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/cds.proto"; import "envoy/api/v2/lds.proto"; -import "envoy/config/trace/v2/trace.proto"; import "envoy/config/metrics/v2/stats.proto"; import "envoy/config/overload/v2alpha/overload.proto"; +import "envoy/config/trace/v2/trace.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "validate/validate.proto"; +// [#protodoc-title: Bootstrap] +// This proto is supplied via the :option:`-c` CLI flag and acts as the root +// of the Envoy v2 configuration. See the :ref:`v2 configuration overview +// ` for more detail. + // Bootstrap :ref:`configuration overview `. message Bootstrap { - // Node identity to present to the management server and for instance - // identification purposes (e.g. in generated headers). - envoy.api.v2.core.Node node = 1; - message StaticResources { // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. - repeated envoy.api.v2.Listener listeners = 1; + repeated api.v2.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's necessary @@ -43,24 +39,24 @@ message Bootstrap { // how to speak to the management server. These cluster definitions may not // use :ref:`EDS ` (i.e. they should be static // IP or DNS-based). - repeated envoy.api.v2.Cluster clusters = 2; + repeated api.v2.Cluster clusters = 2; // These static secrets can be used by :ref:`SdsSecretConfig // ` - repeated envoy.api.v2.auth.Secret secrets = 3; + repeated api.v2.auth.Secret secrets = 3; } - // Statically specified resources. - StaticResources static_resources = 2; message DynamicResources { + reserved 4; + // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. - envoy.api.v2.core.ConfigSource lds_config = 1; + api.v2.core.ConfigSource lds_config = 1; // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. - envoy.api.v2.core.ConfigSource cds_config = 2; + api.v2.core.ConfigSource cds_config = 2; // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type @@ -69,10 +65,18 @@ message Bootstrap { // :ref:`ConfigSources ` that have // the :ref:`ads ` field set will be // streamed on the ADS channel. - envoy.api.v2.core.ApiConfigSource ads_config = 3; - - reserved 4; + api.v2.core.ApiConfigSource ads_config = 3; } + + reserved 10; + + // Node identity to present to the management server and for instance + // identification purposes (e.g. in generated headers). + api.v2.core.Node node = 1; + + // Statically specified resources. + StaticResources static_resources = 2; + // xDS configuration sources. DynamicResources dynamic_resources = 3; @@ -82,16 +86,16 @@ message Bootstrap { // Health discovery service config option. // (:ref:`core.ApiConfigSource `) - envoy.api.v2.core.ApiConfigSource hds_config = 14; + api.v2.core.ApiConfigSource hds_config = 14; // Optional file system path to search for startup flag files. string flags_path = 5; // Optional set of stats sinks. - repeated envoy.config.metrics.v2.StatsSink stats_sinks = 6; + repeated metrics.v2.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. - envoy.config.metrics.v2.StatsConfig stats_config = 13; + metrics.v2.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and @@ -99,8 +103,8 @@ message Bootstrap { // seconds). // Duration must be at least 1ms and at most 5 min. google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { - lt: {seconds: 300}, - gte: {nanos: 1000000} + lt {seconds: 300} + gte {nanos: 1000000} }]; // Optional watchdog configuration. @@ -108,9 +112,7 @@ message Bootstrap { // Configuration for an external tracing provider. If not specified, no // tracing will be performed. - envoy.config.trace.v2.Tracing tracing = 9; - - reserved 10; + trace.v2.Tracing tracing = 9; // Configuration for the runtime configuration provider (deprecated). If not // specified, a “null” provider will be used which will result in all defaults @@ -126,7 +128,7 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - envoy.config.overload.v2alpha.OverloadManager overload_manager = 15; + overload.v2alpha.OverloadManager overload_manager = 15; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This @@ -162,15 +164,20 @@ message Admin { // The TCP address that the administration server will listen on. // If not specified, Envoy will not start an administration server. - envoy.api.v2.core.Address address = 3; + api.v2.core.Address address = 3; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. - repeated envoy.api.v2.core.SocketOption socket_options = 4; + repeated api.v2.core.SocketOption socket_options = 4; } // Cluster manager :ref:`architecture overview `. message ClusterManager { + message OutlierDetection { + // Specifies the path to the outlier event log. + string event_log_path = 1; + } + // Name of the local cluster (i.e., the cluster that owns the Envoy running // this configuration). In order to enable :ref:`zone aware routing // ` this option must be set. @@ -182,22 +189,18 @@ message ClusterManager { // routing `_. string local_cluster_name = 1; - message OutlierDetection { - // Specifies the path to the outlier event log. - string event_log_path = 1; - } // Optional global configuration for outlier detection. OutlierDetection outlier_detection = 2; // Optional configuration used to bind newly established upstream connections. // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. - envoy.api.v2.core.BindConfig upstream_bind_config = 3; + api.v2.core.BindConfig upstream_bind_config = 3; // A management server endpoint to stream load stats to via // *StreamLoadStats*. This must have :ref:`api_type // ` :ref:`GRPC // `. - envoy.api.v2.core.ApiConfigSource load_stats_config = 4; + api.v2.core.ApiConfigSource load_stats_config = 4; } // Envoy process watchdog configuration. When configured, this monitors for @@ -285,12 +288,12 @@ message RuntimeLayer { string name = 1; // RTDS configuration source. - envoy.api.v2.core.ConfigSource rtds_config = 2; + api.v2.core.ConfigSource rtds_config = 2; } // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof layer_specifier { option (validate.required) = true; diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto index c6d47807ce50..9e4626c23e89 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.cluster.dynamic_forward_proxy.v2alpha; -option java_outer_classname = "DynamicForwardProxyClusterProto"; +option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha"; @@ -19,5 +19,5 @@ message ClusterConfig { // match that of associated :ref:`dynamic forward proxy HTTP filter configuration // `. common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/cluster/redis/redis_cluster.proto b/api/envoy/config/cluster/redis/redis_cluster.proto index fabaa0274fb7..c418de9f54e4 100644 --- a/api/envoy/config/cluster/redis/redis_cluster.proto +++ b/api/envoy/config/cluster/redis/redis_cluster.proto @@ -43,8 +43,8 @@ import "validate/validate.proto"; message RedisClusterConfig { // Interval between successive topology refresh requests. If not set, this defaults to 5s. - google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}]; // Timeout for topology refresh request. If not set, this defaults to 3s. - google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}]; } diff --git a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto index f7c796fe90f6..7d9b7d329eca 100644 --- a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto +++ b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto @@ -23,7 +23,7 @@ message DnsCacheConfig { // configurations with the same name *must* otherwise have the same settings when referenced // from different configuration components. Configuration will fail to load if this is not // the case. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The DNS lookup family to use during resolution. // @@ -33,7 +33,8 @@ message DnsCacheConfig { // likely build a "happy eyeballs" connection pool which would race the primary / fall back // address and return the one that wins. This same method could potentially also be used for // QUIC to TCP fall back.] - api.v2.Cluster.DnsLookupFamily dns_lookup_family = 2 [(validate.rules).enum.defined_only = true]; + api.v2.Cluster.DnsLookupFamily dns_lookup_family = 2 + [(validate.rules).enum = {defined_only: true}]; // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. // @@ -41,7 +42,7 @@ message DnsCacheConfig { // // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be // added in a future change. - google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gt {}}]; // The TTL for hosts that are unused. Hosts that have not been used in the configured time // interval will be purged. If not specified defaults to 5m. @@ -55,7 +56,7 @@ message DnsCacheConfig { // .. note: // // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. - google.protobuf.Duration host_ttl = 4 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. // @@ -64,5 +65,5 @@ message DnsCacheConfig { // The implementation is approximate and enforced independently on each worker thread, thus // it is possible for the maximum hosts in the cache to go slightly above the configured // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32.gt = 0]; + google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; } diff --git a/api/envoy/config/common/tap/v2alpha/common.proto b/api/envoy/config/common/tap/v2alpha/common.proto index ac640b83e4fb..391bed13c69c 100644 --- a/api/envoy/config/common/tap/v2alpha/common.proto +++ b/api/envoy/config/common/tap/v2alpha/common.proto @@ -1,28 +1,27 @@ syntax = "proto3"; -import "envoy/service/tap/v2alpha/common.proto"; -import "envoy/api/v2/core/config_source.proto"; - -import "validate/validate.proto"; - package envoy.config.common.tap.v2alpha; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.common.tap.v2alpha"; +import "envoy/api/v2/core/config_source.proto"; +import "envoy/service/tap/v2alpha/common.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: Common tap extension configuration] // Common configuration for all tap extensions. message CommonExtensionConfig { - // [#not-implemented-hide:] message TapDSConfig { // Configuration for the source of TapDS updates for this Cluster. - envoy.api.v2.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; + api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // Tap config to request from XDS server. - string name = 2 [(validate.rules).string.min_bytes = 1]; + string name = 2 [(validate.rules).string = {min_bytes: 1}]; } oneof config_type { @@ -45,5 +44,5 @@ message CommonExtensionConfig { message AdminConfig { // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string.min_bytes = 1]; + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/config/filter/accesslog/v2/accesslog.proto b/api/envoy/config/filter/accesslog/v2/accesslog.proto index d777708175b5..7464094fbf2c 100644 --- a/api/envoy/config/filter/accesslog/v2/accesslog.proto +++ b/api/envoy/config/filter/accesslog/v2/accesslog.proto @@ -98,22 +98,22 @@ message ComparisonFilter { } // Comparison operator. - Op op = 1 [(validate.rules).enum.defined_only = true]; + Op op = 1 [(validate.rules).enum = {defined_only: true}]; // Value to compare against. - envoy.api.v2.core.RuntimeUInt32 value = 2; + api.v2.core.RuntimeUInt32 value = 2; } // Filters on HTTP response/status code. message StatusCodeFilter { // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters on total request duration in milliseconds. message DurationFilter { // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters for requests that are not health check requests. A health check @@ -130,10 +130,10 @@ message TraceableFilter { message RuntimeFilter { // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. // If found in runtime, this value will replace the default numerator. - string runtime_key = 1 [(validate.rules).string.min_bytes = 1]; + string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. - envoy.type.FractionalPercent percent_sampled = 2; + type.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header // :ref:`x-request-id` being present. If @@ -154,21 +154,21 @@ message RuntimeFilter { // Filters are evaluated sequentially and if one of them returns false, the // filter returns false immediately. message AndFilter { - repeated AccessLogFilter filters = 1 [(validate.rules).repeated .min_items = 2]; + repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; } // Performs a logical “or” operation on the result of each individual filter. // Filters are evaluated sequentially and if one of them returns true, the // filter returns true immediately. message OrFilter { - repeated AccessLogFilter filters = 2 [(validate.rules).repeated .min_items = 2]; + repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; } // Filters requests based on the presence or value of a request header. message HeaderFilter { // Only requests with a header which matches the specified HeaderMatcher will pass the filter // check. - envoy.api.v2.route.HeaderMatcher header = 1 [(validate.rules).message.required = true]; + api.v2.route.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. @@ -178,27 +178,29 @@ message ResponseFlagFilter { // Only responses with the any of the flags listed in this field will be logged. // This field is optional. If it is not specified, then any response flag will pass // the filter check. - repeated string flags = 1 [(validate.rules).repeated .items.string = { - in: [ - "LH", - "UH", - "UT", - "LR", - "UR", - "UF", - "UC", - "UO", - "NR", - "DI", - "FI", - "RL", - "UAEX", - "RLSE", - "DC", - "URX", - "SI", - "IH" - ] + repeated string flags = 1 [(validate.rules).repeated = { + items { + string { + in: "LH" + in: "UH" + in: "UT" + in: "LR" + in: "UR" + in: "UF" + in: "UC" + in: "UO" + in: "NR" + in: "DI" + in: "FI" + in: "RL" + in: "UAEX" + in: "RLSE" + in: "DC" + in: "URX" + in: "SI" + in: "IH" + } + } }]; } @@ -207,26 +209,42 @@ message ResponseFlagFilter { message GrpcStatusFilter { enum Status { OK = 0; + CANCELED = 1; + UNKNOWN = 2; + INVALID_ARGUMENT = 3; + DEADLINE_EXCEEDED = 4; + NOT_FOUND = 5; + ALREADY_EXISTS = 6; + PERMISSION_DENIED = 7; + RESOURCE_EXHAUSTED = 8; + FAILED_PRECONDITION = 9; + ABORTED = 10; + OUT_OF_RANGE = 11; + UNIMPLEMENTED = 12; + INTERNAL = 13; + UNAVAILABLE = 14; + DATA_LOSS = 15; + UNAUTHENTICATED = 16; } // Logs only responses that have any one of the gRPC statuses in this field. - repeated Status statuses = 1 [(validate.rules).repeated .items.enum.defined_only = true]; + repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; // If included and set to true, the filter will instead block all responses with a gRPC status or // inferred gRPC status enumerated in statuses, and allow all other responses. @@ -242,6 +260,7 @@ message ExtensionFilter { // Custom configuration that depends on the filter being instantiated. oneof config_type { google.protobuf.Struct config = 2; + google.protobuf.Any typed_config = 3; } } diff --git a/api/envoy/config/filter/fault/v2/fault.proto b/api/envoy/config/filter/fault/v2/fault.proto index 15164172dcf4..41b4a9f09600 100644 --- a/api/envoy/config/filter/fault/v2/fault.proto +++ b/api/envoy/config/filter/fault/v2/fault.proto @@ -17,22 +17,22 @@ import "validate/validate.proto"; // Delay specification is used to inject latency into the // HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. message FaultDelay { + enum FaultDelayType { + // Unused and deprecated. + FIXED = 0; + } + // Fault delays are controlled via an HTTP header (if applicable). See the // :ref:`http fault filter ` documentation for // more information. message HeaderDelay { } - enum FaultDelayType { - // Unused and deprecated. - FIXED = 0; - } + reserved 2; // Unused and deprecated. Will be removed in the next release. FaultDelayType type = 1 [deprecated = true]; - reserved 2; - oneof fault_delay_secifier { option (validate.required) = true; @@ -42,7 +42,7 @@ message FaultDelay { // delay will be injected before a new request/operation. For TCP // connections, the proxying of the connection upstream will be delayed // for the specified period. This is required if type is FIXED. - google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; // Fault delays are controlled via an HTTP header (if applicable). HeaderDelay header_delay = 5; @@ -57,7 +57,7 @@ message FaultRateLimit { // Describes a fixed/constant rate limit. message FixedLimit { // The limit supplied in KiB/s. - uint64 limit_kbps = 1 [(validate.rules).uint64.gte = 1]; + uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}]; } // Rate limits are controlled via an HTTP header (if applicable). See the diff --git a/api/envoy/config/filter/http/buffer/v2/buffer.proto b/api/envoy/config/filter/http/buffer/v2/buffer.proto index 7f2f8c98239c..44062f248199 100644 --- a/api/envoy/config/filter/http/buffer/v2/buffer.proto +++ b/api/envoy/config/filter/http/buffer/v2/buffer.proto @@ -18,7 +18,7 @@ message Buffer { // The maximum request size that the filter will buffer before the connection // manager will stop buffering and return a 413 response. - google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; } message BufferPerRoute { @@ -26,9 +26,9 @@ message BufferPerRoute { option (validate.required) = true; // Disable the buffer filter for this particular vhost or route. - bool disabled = 1 [(validate.rules).bool.const = true]; + bool disabled = 1 [(validate.rules).bool = {const: true}]; // Override the global configuration of the filter with this new config. - Buffer buffer = 2 [(validate.rules).message.required = true]; + Buffer buffer = 2 [(validate.rules).message = {required: true}]; } } diff --git a/api/envoy/config/filter/http/csrf/v2/csrf.proto b/api/envoy/config/filter/http/csrf/v2/csrf.proto index b5c78db544a7..df86120c28fd 100644 --- a/api/envoy/config/filter/http/csrf/v2/csrf.proto +++ b/api/envoy/config/filter/http/csrf/v2/csrf.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.filter.http.csrf.v2; -option java_outer_classname = "CsrfPolicyProto"; +option java_outer_classname = "CsrfProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2"; @@ -25,8 +25,8 @@ message CsrfPolicy { // // This field defaults to 100/:ref:`HUNDRED // `. - envoy.api.v2.core.RuntimeFractionalPercent filter_enabled = 1 - [(validate.rules).message.required = true]; + api.v2.core.RuntimeFractionalPercent filter_enabled = 1 + [(validate.rules).message = {required: true}]; // Specifies that CSRF policies will be evaluated and tracked, but not enforced. // This is intended to be used when filter_enabled is off. @@ -38,12 +38,12 @@ message CsrfPolicy { // // This field defaults to 100/:ref:`HUNDRED // `. - envoy.api.v2.core.RuntimeFractionalPercent shadow_enabled = 2; + api.v2.core.RuntimeFractionalPercent shadow_enabled = 2; // Specifies additional source origins that will be allowed in addition to // the destination origin. // // More information on how this can be configured via runtime can be found // :ref:`here `. - repeated envoy.type.matcher.StringMatcher additional_origins = 3; + repeated type.matcher.StringMatcher additional_origins = 3; } diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index c315ddb46515..daa0822341a0 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -19,5 +19,5 @@ message FilterConfig { // match that of associated :ref:`dynamic forward proxy cluster configuration // `. common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index 84d4ab19495b..d18cc8440b98 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -9,7 +9,6 @@ option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/grpc_service.proto"; import "envoy/api/v2/core/http_uri.proto"; - import "envoy/type/http_status.proto"; import "envoy/type/matcher/string.proto"; @@ -22,7 +21,7 @@ message ExtAuthz { // External authorization service configuration. oneof services { // gRPC service configuration (default timeout: 200ms). - envoy.api.v2.core.GrpcService grpc_service = 1; + api.v2.core.GrpcService grpc_service = 1; // HTTP service configuration (default timeout: 200ms). HttpService http_service = 3; @@ -67,7 +66,7 @@ message ExtAuthz { // Sets the HTTP status that is returned to the client when there is a network error between the // filter and the authorization server. The default status is HTTP 403 Forbidden. - envoy.type.HttpStatus status_on_error = 7; + type.HttpStatus status_on_error = 7; // Specifies a list of metadata namespaces whose values, if present, will be passed to the // ext_authz service as an opaque *protobuf::Struct*. @@ -90,7 +89,7 @@ message BufferSettings { // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow // `. - uint32 max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. // The authorization request will be dispatched and no 413 HTTP error will be returned by the @@ -123,17 +122,14 @@ message BufferSettings { // ` // for details. message HttpService { + reserved 3, 4, 5, 6; + // Sets the HTTP server URI which the authorization requests must be sent to. - envoy.api.v2.core.HttpUri server_uri = 1; + api.v2.core.HttpUri server_uri = 1; // Sets a prefix to the value of authorization request header *Path*. string path_prefix = 2; - reserved 3; - reserved 4; - reserved 5; - reserved 6; - // Settings used for controlling authorization request metadata. AuthorizationRequest authorization_request = 7; @@ -151,25 +147,25 @@ message AuthorizationRequest { // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have // a message body. // - envoy.type.matcher.ListStringMatcher allowed_headers = 1; + type.matcher.ListStringMatcher allowed_headers = 1; // Sets a list of headers that will be included to the request to authorization service. Note that // client request of the same key will be overridden. - repeated envoy.api.v2.core.HeaderValue headers_to_add = 2; + repeated api.v2.core.HeaderValue headers_to_add = 2; } message AuthorizationResponse { // When this :ref:`list ` is set, authorization // response headers that have a correspondent match will be added to the original client request. // Note that coexistent headers will be overridden. - envoy.type.matcher.ListStringMatcher allowed_upstream_headers = 1; + type.matcher.ListStringMatcher allowed_upstream_headers = 1; // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority // (Host)* will be in the response to the client. When a header is included in this list, *Path*, // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. - envoy.type.matcher.ListStringMatcher allowed_client_headers = 2; + type.matcher.ListStringMatcher allowed_client_headers = 2; } // Extra settings on a per virtualhost/route/weighted-cluster level. @@ -179,10 +175,10 @@ message ExtAuthzPerRoute { // Disable the ext auth filter for this particular vhost or route. // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool.const = true]; + bool disabled = 1 [(validate.rules).bool = {const: true}]; // Check request settings for this route. - CheckSettings check_settings = 2 [(validate.rules).message.required = true]; + CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; } } diff --git a/api/envoy/config/filter/http/fault/v2/fault.proto b/api/envoy/config/filter/http/fault/v2/fault.proto index 8256690837fc..98c13ff97b40 100644 --- a/api/envoy/config/filter/http/fault/v2/fault.proto +++ b/api/envoy/config/filter/http/fault/v2/fault.proto @@ -24,7 +24,7 @@ message FaultAbort { option (validate.required) = true; // HTTP status code to use to abort the HTTP request. - uint32 http_status = 2 [(validate.rules).uint32 = {gte: 200, lt: 600}]; + uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; } // The percentage of requests/operations/connections that will be aborted with the error code @@ -55,7 +55,7 @@ message HTTPFault { // headers in the filter config. A match will happen if all the headers in the // config are present in the request with the same values (or based on // presence if the *value* field is not in the config). - repeated envoy.api.v2.route.HeaderMatcher headers = 4; + repeated api.v2.route.HeaderMatcher headers = 4; // Faults are injected for the specified list of downstream hosts. If this // setting is not set, faults are injected for all downstream nodes. diff --git a/api/envoy/config/filter/http/gzip/v2/gzip.proto b/api/envoy/config/filter/http/gzip/v2/gzip.proto index 02041b87fd27..ac903b693121 100644 --- a/api/envoy/config/filter/http/gzip/v2/gzip.proto +++ b/api/envoy/config/filter/http/gzip/v2/gzip.proto @@ -14,34 +14,39 @@ import "validate/validate.proto"; // Gzip :ref:`configuration overview `. message Gzip { - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {gte: 1, lte: 9}]; + enum CompressionStrategy { + DEFAULT = 0; - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value content_length = 2 [(validate.rules).uint32.gte = 30]; + FILTERED = 1; + + HUFFMAN = 2; + + RLE = 3; + } message CompressionLevel { enum Enum { DEFAULT = 0; + BEST = 1; + SPEED = 2; } } + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values + // use more memory, but are faster and produce better compression results. The default value is 5. + google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; + + // Minimum response length, in bytes, which will trigger compression. The default value is 30. + google.protobuf.UInt32Value content_length = 2 [(validate.rules).uint32 = {gte: 30}]; + // A value used for selecting the zlib compression level. This setting will affect speed and // amount of compression applied to the content. "BEST" provides higher compression at the cost of // higher latency, "SPEED" provides lower compression with minimum impact on response time. // "DEFAULT" provides an optimal result between speed and compression. This field will be set to // "DEFAULT" if not specified. - CompressionLevel.Enum compression_level = 3 [(validate.rules).enum.defined_only = true]; - - enum CompressionStrategy { - DEFAULT = 0; - FILTERED = 1; - HUFFMAN = 2; - RLE = 3; - } + CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; // A value used for selecting the zlib compression strategy which is directly related to the // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though @@ -49,7 +54,7 @@ message Gzip { // run-length encoding (RLE) is typically used when the content is known for having sequences // which same data occurs many consecutive times. For more information about each strategy, please // refer to zlib manual. - CompressionStrategy compression_strategy = 4 [(validate.rules).enum.defined_only = true]; + CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; // Set of strings that allows specifying which mime-types yield compression; e.g., // application/json, text/html, etc. When this field is not defined, compression will be applied @@ -69,5 +74,5 @@ message Gzip { // Larger window results in better compression at the expense of memory usage. The default is 12 // which will produce a 4096 bytes window. For more details about this parameter, please refer to // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {gte: 9, lte: 15}]; + google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; } diff --git a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto index 345c5225edf1..6b66a0c89797 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto +++ b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto @@ -18,6 +18,7 @@ import "validate/validate.proto"; message Config { enum ValueType { STRING = 0; + NUMBER = 1; // The value is a serialized `protobuf.Value @@ -41,7 +42,7 @@ message Config { string metadata_namespace = 1; // The key to use within the namespace. - string key = 2 [(validate.rules).string.min_bytes = 1]; + string key = 2 [(validate.rules).string = {min_bytes: 1}]; // The value to pair with the given key. // @@ -63,7 +64,7 @@ message Config { // A Rule defines what metadata to apply when a header is present or missing. message Rule { // The header that triggers this rule — required. - string header = 1 [(validate.rules).string.min_bytes = 1]; + string header = 1 [(validate.rules).string = {min_bytes: 1}]; // If the header is present, apply this metadata KeyValuePair. // diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto index 9cd572b43709..15a5b7fd3b7e 100644 --- a/api/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto @@ -6,23 +6,22 @@ option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2"; -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - import "envoy/api/v2/route/route.proto"; import "envoy/type/percent.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. message HealthCheck { - // Specifies whether the filter operates in pass through mode or not. - google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message.required = true]; - reserved 2; - reserved "endpoint"; + + // Specifies whether the filter operates in pass through mode or not. + google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; // If operating in pass through mode, the amount of time in milliseconds // that the filter should cache the upstream response. @@ -31,10 +30,10 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. - map cluster_min_healthy_percentages = 4; + map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will // check a request’s headers against all the specified headers. To specify the health check // endpoint, set the ``:path`` header to match on. - repeated envoy.api.v2.route.HeaderMatcher headers = 5; + repeated api.v2.route.HeaderMatcher headers = 5; } diff --git a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto index 92ec469c62ad..ac088d80eaac 100644 --- a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto +++ b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto @@ -14,7 +14,6 @@ import "validate/validate.proto"; // IP tagging :ref:`configuration overview `. message IPTagging { - // The type of requests the filter should apply to. The supported types // are internal, external or both. The // :ref:`x-forwarded-for` header is @@ -32,9 +31,6 @@ message IPTagging { EXTERNAL = 2; } - // The type of request the filter should apply to. - RequestType request_type = 1 [(validate.rules).enum.defined_only = true]; - // Supplies the IP tag name and the IP address subnets. message IPTag { // Specifies the IP tag name to apply. @@ -42,11 +38,14 @@ message IPTagging { // A list of IP address subnets that will be tagged with // ip_tag_name. Both IPv4 and IPv6 are supported. - repeated envoy.api.v2.core.CidrRange ip_list = 2; + repeated api.v2.core.CidrRange ip_list = 2; } + // The type of request the filter should apply to. + RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] // The set of IP tags for the filter. - repeated IPTag ip_tags = 4 [(validate.rules).repeated .min_items = 1]; + repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index c07b780b9649..e2584b137505 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -1,4 +1,3 @@ - syntax = "proto3"; package envoy.config.filter.http.jwt_authn.v2alpha; @@ -10,8 +9,10 @@ option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha" import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/http_uri.proto"; import "envoy/api/v2/route/route.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; + import "validate/validate.proto"; // [#protodoc-title: JWT Authentication] @@ -53,7 +54,7 @@ message JwtProvider { // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // - string issuer = 1 [(validate.rules).string.min_bytes = 1]; + string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, @@ -107,7 +108,7 @@ message JwtProvider { // local_jwks: // inline_string: ACADADADADA // - envoy.api.v2.core.DataSource local_jwks = 4; + api.v2.core.DataSource local_jwks = 4; } // If false, the JWT is removed in the request after a success verification. If true, the JWT is @@ -193,7 +194,7 @@ message RemoteJwks { // uri: https://www.googleapis.com/oauth2/v1/certs // cluster: jwt.www.googleapis.com|443 // - envoy.api.v2.core.HttpUri http_uri = 1; + api.v2.core.HttpUri http_uri = 1; // Duration after which the cached JWKS should be expired. If not specified, default cache // duration is 5 minutes. @@ -203,7 +204,7 @@ message RemoteJwks { // This message specifies a header location to extract JWT token. message JwtHeader { // The HTTP header name. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the @@ -288,14 +289,14 @@ message JwtRequirement { // Their results are OR-ed; if any one of them passes, the result is passed message JwtRequirementOrList { // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated .min_items = 2]; + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a list of RequiredProvider. // Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. message JwtRequirementAndList { // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated .min_items = 2]; + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a Jwt requirement for a specific Route condition. @@ -330,7 +331,7 @@ message RequirementRule { // match: // prefix: / // - envoy.api.v2.route.RouteMatch match = 1 [(validate.rules).message.required = true]; + api.v2.route.RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. JwtRequirement requires = 2; @@ -355,7 +356,7 @@ message RequirementRule { // jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. message FilterStateRule { // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. diff --git a/api/envoy/config/filter/http/lua/v2/lua.proto b/api/envoy/config/filter/http/lua/v2/lua.proto index 6fc7fabc6be3..dae34551a0ac 100644 --- a/api/envoy/config/filter/http/lua/v2/lua.proto +++ b/api/envoy/config/filter/http/lua/v2/lua.proto @@ -16,5 +16,5 @@ message Lua { // further loads code from disk if desired. Note that if JSON configuration is used, the code must // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. - string inline_code = 1 [(validate.rules).string.min_bytes = 1]; + string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto index 5c09b860fc5c..e2de31ea0b8c 100644 --- a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto +++ b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto @@ -15,7 +15,6 @@ import "validate/validate.proto"; // for the request. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. message OriginalSrc { - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to // ensure that non-local addresses may be routed back through envoy when binding to the original // source address. The option will not be applied if the mark is 0. diff --git a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto index 08189be1df89..1cfd362d86e5 100644 --- a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto @@ -17,7 +17,7 @@ import "validate/validate.proto"; message RateLimit { // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string.min_bytes = 1]; + string domain = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies the rate limit configurations to be applied with the same // stage number. If not set, the default stage number is 0. @@ -25,7 +25,7 @@ message RateLimit { // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32.lte = 10]; + uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The type of requests the filter should apply to. The supported // types are *internal*, *external* or *both*. A request is considered internal if @@ -53,6 +53,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7 - [(validate.rules).message.required = true]; + ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/http/rbac/v2/rbac.proto b/api/envoy/config/filter/http/rbac/v2/rbac.proto index 7c9a3c24d017..4bdd8c5f2c9c 100644 --- a/api/envoy/config/filter/http/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/http/rbac/v2/rbac.proto @@ -28,8 +28,6 @@ message RBAC { message RBACPerRoute { reserved 1; - reserved "disabled"; - // Override the global configuration of the filter with this new config. // If absent, the global RBAC policy will be disabled for this route. RBAC rbac = 2; diff --git a/api/envoy/config/filter/http/router/v2/router.proto b/api/envoy/config/filter/http/router/v2/router.proto index fd0cadec9631..7543069af029 100644 --- a/api/envoy/config/filter/http/router/v2/router.proto +++ b/api/envoy/config/filter/http/router/v2/router.proto @@ -30,7 +30,7 @@ message Router { // are configured in the same way as access logs, but each log entry represents // an upstream request. Presuming retries are configured, multiple upstream // requests may be made for each downstream (inbound) request. - repeated envoy.config.filter.accesslog.v2.AccessLog upstream_log = 3; + repeated accesslog.v2.AccessLog upstream_log = 3; // Do not add any additional *x-envoy-* headers to requests or responses. This // only affects the :ref:`router filter generated *x-envoy-* headers @@ -54,13 +54,15 @@ message Router { // * :ref:`config_http_filters_router_x-envoy-max-retries` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - repeated string strict_check_headers = 5 [(validate.rules).repeated .items.string = { - in: [ - "x-envoy-upstream-rq-timeout-ms", - "x-envoy-upstream-rq-per-try-timeout-ms", - "x-envoy-max-retries", - "x-envoy-retry-grpc-on", - "x-envoy-retry-on" - ] + repeated string strict_check_headers = 5 [(validate.rules).repeated = { + items { + string { + in: "x-envoy-upstream-rq-timeout-ms" + in: "x-envoy-upstream-rq-per-try-timeout-ms" + in: "x-envoy-max-retries" + in: "x-envoy-retry-grpc-on" + in: "x-envoy-retry-on" + } + } }]; } diff --git a/api/envoy/config/filter/http/squash/v2/squash.proto b/api/envoy/config/filter/http/squash/v2/squash.proto index 54a67ceddf1c..e75ee5d67540 100644 --- a/api/envoy/config/filter/http/squash/v2/squash.proto +++ b/api/envoy/config/filter/http/squash/v2/squash.proto @@ -17,7 +17,7 @@ import "validate/validate.proto"; // [#proto-status: experimental] message Squash { // The name of the cluster that hosts the Squash server. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // When the filter requests the Squash server to create a DebugAttachment, it will use this // structure as template for the body of the request. It can contain reference to environment diff --git a/api/envoy/config/filter/http/tap/v2alpha/tap.proto b/api/envoy/config/filter/http/tap/v2alpha/tap.proto index 10d7bdd1e0d8..ee9027055ab9 100644 --- a/api/envoy/config/filter/http/tap/v2alpha/tap.proto +++ b/api/envoy/config/filter/http/tap/v2alpha/tap.proto @@ -1,15 +1,15 @@ syntax = "proto3"; -import "envoy/config/common/tap/v2alpha/common.proto"; - -import "validate/validate.proto"; - package envoy.config.filter.http.tap.v2alpha; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha"; +import "envoy/config/common/tap/v2alpha/common.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. @@ -17,5 +17,5 @@ option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha"; message Tap { // Common configuration for the HTTP tap filter. common.tap.v2alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto index 248f78bb82e0..42947918f2db 100644 --- a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto +++ b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto @@ -12,27 +12,6 @@ import "validate/validate.proto"; // gRPC-JSON transcoder :ref:`configuration overview `. message GrpcJsonTranscoder { - oneof descriptor_set { - option (validate.required) = true; - - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - string proto_descriptor = 1; - - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - bytes proto_descriptor_bin = 4; - } - - // A list of strings that - // supplies the fully qualified service names (i.e. "package_name.service_name") that - // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, - // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than - // the service names specified here, but they won't be translated. - repeated string services = 2 [(validate.rules).repeated .min_items = 1]; - message PrintOptions { // Whether to add spaces, line breaks and indentation to make the JSON // output easy to read. Defaults to false. @@ -53,7 +32,28 @@ message GrpcJsonTranscoder { // generate JSON field names using the ``json_name`` option, or lower camel case, // in that order. Setting this flag will preserve the original field names. Defaults to false. bool preserve_proto_field_names = 4; - }; + } + + oneof descriptor_set { + option (validate.required) = true; + + // Supplies the filename of + // :ref:`the proto descriptor set ` for the gRPC + // services. + string proto_descriptor = 1; + + // Supplies the binary content of + // :ref:`the proto descriptor set ` for the gRPC + // services. + bytes proto_descriptor_bin = 4; + } + + // A list of strings that + // supplies the fully qualified service names (i.e. "package_name.service_name") that + // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, + // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than + // the service names specified here, but they won't be translated. + repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; // Control options for response JSON. These options are passed directly to // `JsonPrintOptions `. - string auth_api_cluster = 1 [(validate.rules).string.min_bytes = 1]; + string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; // Time in milliseconds between principal refreshes from the // authentication service. Default is 60000 (60s). The actual fetch time @@ -35,5 +36,5 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no // IP white list. - repeated envoy.api.v2.core.CidrRange ip_white_list = 4; + repeated api.v2.core.CidrRange ip_white_list = 4; } diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto index d4c6937e56ec..b46bf9384469 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto @@ -15,16 +15,28 @@ import "validate/validate.proto"; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. +// Dubbo Protocol types supported by Envoy. +enum ProtocolType { + // the default protocol. + Dubbo = 0; +} + +// Dubbo Serialization types supported by Envoy. +enum SerializationType { + // the default serialization protocol. + Hessian2 = 0; +} + // [#comment:next free field: 6] message DubboProxy { // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // Configure the protocol used. - ProtocolType protocol_type = 2 [(validate.rules).enum.defined_only = true]; + ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; // Configure the serialization protocol used. - SerializationType serialization_type = 3 [(validate.rules).enum.defined_only = true]; + SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; // The route table for the connection manager is static and is specified in this property. repeated RouteConfiguration route_config = 4; @@ -36,24 +48,12 @@ message DubboProxy { repeated DubboFilter dubbo_filters = 5; } -// Dubbo Protocol types supported by Envoy. -enum ProtocolType { - // the default protocol. - Dubbo = 0; -} - -// Dubbo Serialization types supported by Envoy. -enum SerializationType { - // the default serialization protocol. - Hessian2 = 0; -} - // DubboFilter configures a Dubbo filter. // [#comment:next free field: 3] message DubboFilter { // The name of the filter to instantiate. The name must match a supported // filter. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto index 02d86443a6f3..77565fb3a771 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto @@ -39,10 +39,10 @@ message RouteConfiguration { // [#comment:next free field: 3] message Route { // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message.required = true]; + RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message.required = true]; + RouteAction route = 2 [(validate.rules).message = {required: true}]; } // [#comment:next free field: 3] @@ -54,7 +54,7 @@ message RouteMatch { // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). - repeated envoy.api.v2.route.HeaderMatcher headers = 2; + repeated api.v2.route.HeaderMatcher headers = 2; } // [#comment:next free field: 3] @@ -69,15 +69,12 @@ message RouteAction { // request is routed to one of the upstream clusters based on weights // assigned to each cluster. // Currently ClusterWeight only supports the name and weight fields. - envoy.api.v2.route.WeightedCluster weighted_clusters = 2; + api.v2.route.WeightedCluster weighted_clusters = 2; } } // [#comment:next free field: 5] message MethodMatch { - // The name of the method. - envoy.type.matcher.StringMatcher name = 1; - // The parameter matching type. message ParameterMatchSpecifier { oneof parameter_match_specifier { @@ -95,10 +92,13 @@ message MethodMatch { // // * For range [-10,0), route will match for header value -1, but not for 0, // "somestring", 10.9, "-1somestring" - envoy.type.Int64Range range_match = 4; + type.Int64Range range_match = 4; } } + // The name of the method. + type.matcher.StringMatcher name = 1; + // Method parameter definition. // The key is the parameter index, starting from 0. // The value is the parameter matching type. diff --git a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto index 8d0a6c6ca246..bc9ed5d51ca4 100644 --- a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto @@ -20,11 +20,11 @@ import "validate/validate.proto"; // A failed check will cause this filter to close the TCP connection. message ExtAuthz { // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The external authorization gRPC service configuration. // The default timeout is set to 200ms by this filter. - envoy.api.v2.core.GrpcService grpc_service = 2; + api.v2.core.GrpcService grpc_service = 2; // The filter's behaviour in case the external authorization service does // not respond back. When it is set to true, Envoy will also allow traffic in case of diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index fae9786c3893..efdfb4be9392 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -26,7 +26,6 @@ import "validate/validate.proto"; // [#comment:next free field: 35] message HttpConnectionManager { enum CodecType { - // For every new connection, the connection manager will determine which // codec to use. This mode supports both ALPN for TLS listeners as well as // protocol inference for plaintext listeners. If ALPN data is available, it @@ -43,42 +42,44 @@ message HttpConnectionManager { HTTP2 = 2; } - // Supplies the type of codec that the connection manager should use. - CodecType codec_type = 1 [(validate.rules).enum.defined_only = true]; + enum ServerHeaderTransformation { + // Overwrite any Server header with the contents of server_name. + OVERWRITE = 0; - // The human readable prefix to use when emitting statistics for the - // connection manager. See the :ref:`statistics documentation ` for - // more information. - string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + // If no Server header is present, append Server server_name + // If a Server header is present, pass it through. + APPEND_IF_ABSENT = 1; - oneof route_specifier { - option (validate.required) = true; + // Pass through the value of the server header, and do not append a header + // if none is present. + PASS_THROUGH = 2; + } - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + enum ForwardClientCertDetails { + // Do not send the XFCC header to the next hop. This is the default value. + SANITIZE = 0; - // The route table for the connection manager is static and is specified in this property. - envoy.api.v2.RouteConfiguration route_config = 4; + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + FORWARD_ONLY = 1; - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + APPEND_FORWARD = 2; - // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + SANITIZE_SET = 3; - // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + ALWAYS_FORWARD_ONLY = 4; + } message Tracing { enum OperationName { - // The HTTP listener is used for ingress/incoming requests. INGRESS = 0; @@ -92,7 +93,8 @@ message HttpConnectionManager { // // .. attention:: // This field has been deprecated in favor of `traffic_direction`. - OperationName operation_name = 1 [(validate.rules).enum.defined_only = true, deprecated = true]; + OperationName operation_name = 1 + [(validate.rules).enum = {defined_only: true}, deprecated = true]; // A list of header names used to create tags for the active span. The header name is used to // populate the tag name, and the header value is used to populate the tag value. The tag is @@ -105,14 +107,14 @@ message HttpConnectionManager { // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% - envoy.type.Percent client_sampling = 3; + type.Percent client_sampling = 3; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.Percent random_sampling = 4; + type.Percent random_sampling = 4; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random @@ -122,7 +124,7 @@ message HttpConnectionManager { // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.Percent overall_sampling = 5; + type.Percent overall_sampling = 5; // Whether to annotate spans with additional data. If true, spans will include logs for stream // events. @@ -134,37 +136,124 @@ message HttpConnectionManager { google.protobuf.UInt32Value max_path_tag_length = 7; } + message InternalAddressConfig { + // Whether unix socket addresses should be considered internal. + bool unix_sockets = 1; + } + + // [#comment:next free field: 7] + message SetCurrentClientCertDetails { + reserved 2; + + // Whether to forward the subject of the client cert. Defaults to false. + google.protobuf.BoolValue subject = 1; + + // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + // XFCC header comma separated from other values with the value Cert="PEM". + // Defaults to false. + bool cert = 3; + + // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + // format. This will appear in the XFCC header comma separated from other values with the value + // Chain="PEM". + // Defaults to false. + bool chain = 6; + + // Whether to forward the DNS type Subject Alternative Names of the client cert. + // Defaults to false. + bool dns = 4; + + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. + bool uri = 5; + } + + // The configuration for HTTP upgrades. + // For each upgrade type desired, an UpgradeConfig must be added. + // + // .. warning:: + // + // The current implementation of upgrade headers does not handle + // multi-valued upgrade headers. Support for multi-valued headers may be + // added in the future if needed. + // + // .. warning:: + // The current implementation of upgrade headers does not work with HTTP/2 + // upstreams. + message UpgradeConfig { + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] + // will be proxied upstream. + string upgrade_type = 1; + + // If present, this represents the filter chain which will be created for + // this type of upgrade. If no filters are present, the filter chain for + // HTTP connections will be used for this upgrade type. + repeated HttpFilter filters = 2; + + // Determines if upgrades are enabled or disabled by default. Defaults to true. + // This can be overridden on a per-route basis with :ref:`cluster + // ` as documented in the + // :ref:`upgrade documentation `. + google.protobuf.BoolValue enabled = 3; + } + + reserved 27; + + // Supplies the type of codec that the connection manager should use. + CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics for the + // connection manager. See the :ref:`statistics documentation ` for + // more information. + string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; + + // The route table for the connection manager is static and is specified in this property. + api.v2.RouteConfiguration route_config = 4; + + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes scoped_routes = 31; + } + + // A list of individual HTTP filters that make up the filter chain for + // requests made to the connection manager. Order matters as the filters are + // processed sequentially as request events happen. + repeated HttpFilter http_filters = 5; + + // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + // documentation for more information. Defaults to false. + google.protobuf.BoolValue add_user_agent = 6; + // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. Tracing tracing = 7; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - envoy.api.v2.core.Http1ProtocolOptions http_protocol_options = 8; + api.v2.core.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - envoy.api.v2.core.Http2ProtocolOptions http2_protocol_options = 9; + api.v2.core.Http2ProtocolOptions http2_protocol_options = 9; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. string server_name = 10; - enum ServerHeaderTransformation { - - // Overwrite any Server header with the contents of server_name. - OVERWRITE = 0; - // If no Server header is present, append Server server_name - // If a Server header is present, pass it through. - APPEND_IF_ABSENT = 1; - // Pass through the value of the server header, and do not append a header - // if none is present. - PASS_THROUGH = 2; - } // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in // server_name. ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum.defined_only = true]; + [(validate.rules).enum = {defined_only: true}]; // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. @@ -172,7 +261,7 @@ message HttpConnectionManager { // The max configurable limit is 96 KiB, based on current implementation // constraints. google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32.gt = 0, (validate.rules).uint32.lte = 96]; + [(validate.rules).uint32 = {lte: 96 gt: 0}]; // The idle timeout for connections managed by the connection manager. The // idle timeout is defined as the period in which there are no active @@ -261,7 +350,7 @@ message HttpConnectionManager { // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - repeated envoy.config.filter.accesslog.v2.AccessLog access_log = 13; + repeated accesslog.v2.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating @@ -279,11 +368,6 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. uint32 xff_num_trusted_hops = 19; - message InternalAddressConfig { - // Whether unix socket addresses should be considered internal. - bool unix_sockets = 1; - } - // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more @@ -316,61 +400,10 @@ message HttpConnectionManager { // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - enum ForwardClientCertDetails { - - // Do not send the XFCC header to the next hop. This is the default value. - SANITIZE = 0; - - // When the client connection is mTLS (Mutual TLS), forward the XFCC header - // in the request. - FORWARD_ONLY = 1; - - // When the client connection is mTLS, append the client certificate - // information to the request’s XFCC header and forward it. - APPEND_FORWARD = 2; - - // When the client connection is mTLS, reset the XFCC header with the client - // certificate information and send it to the next hop. - SANITIZE_SET = 3; - - // Always forward the XFCC header in the request, regardless of whether the - // client connection is mTLS. - ALWAYS_FORWARD_ONLY = 4; - }; - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum.defined_only = true]; - - // [#comment:next free field: 7] - message SetCurrentClientCertDetails { - // Whether to forward the subject of the client cert. Defaults to false. - google.protobuf.BoolValue subject = 1; - - reserved 2; - - // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - // XFCC header comma separated from other values with the value Cert="PEM". - // Defaults to false. - bool cert = 3; - - // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - // format. This will appear in the XFCC header comma separated from other values with the value - // Chain="PEM". - // Defaults to false. - bool chain = 6; - - // Whether to forward the DNS type Subject Alternative Names of the client cert. - // Defaults to false. - bool dns = 4; - - // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - // false. - bool uri = 5; - }; + [(validate.rules).enum = {defined_only: true}]; // This field is valid only when :ref:`forward_client_cert_details // ` @@ -402,38 +435,8 @@ message HttpConnectionManager { // [#not-implemented-hide:] bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - // The configuration for HTTP upgrades. - // For each upgrade type desired, an UpgradeConfig must be added. - // - // .. warning:: - // - // The current implementation of upgrade headers does not handle - // multi-valued upgrade headers. Support for multi-valued headers may be - // added in the future if needed. - // - // .. warning:: - // The current implementation of upgrade headers does not work with HTTP/2 - // upstreams. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] - // will be proxied upstream. - string upgrade_type = 1; - // If present, this represents the filter chain which will be created for - // this type of upgrade. If no filters are present, the filter chain for - // HTTP connections will be used for this upgrade type. - repeated HttpFilter filters = 2; - // Determines if upgrades are enabled or disabled by default. Defaults to true. - // This can be overridden on a per-route basis with :ref:`cluster - // ` as documented in the - // :ref:`upgrade documentation `. - google.protobuf.BoolValue enabled = 3; - }; repeated UpgradeConfig upgrade_configs = 23; - reserved 27; - // Should paths be normalized according to RFC 3986 before any processing of // requests by HTTP filters or routing? This affects the upstream *:path* header // as well. For paths that fail this check, Envoy will respond with 400 to @@ -457,25 +460,22 @@ message HttpConnectionManager { message Rds { // Configuration source specifier for RDS. - envoy.api.v2.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; + api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // The name of the route configuration. This name will be passed to the RDS // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. - string route_config_name = 2 [(validate.rules).string.min_bytes = 1]; + string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; } // This message is used to work around the limitations with 'oneof' and repeated fields. message ScopedRouteConfigurationsList { - repeated envoy.api.v2.ScopedRouteConfiguration scoped_route_configurations = 1 - [(validate.rules).repeated .min_items = 1]; + repeated api.v2.ScopedRouteConfiguration scoped_route_configurations = 1 + [(validate.rules).repeated = {min_items: 1}]; } message ScopedRoutes { - // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string.min_bytes = 1]; - // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These // keys are matched against a set of :ref:`Key` // objects assembled from :ref:`ScopedRouteConfiguration` @@ -506,15 +506,6 @@ message ScopedRoutes { // // Each 'a=b' key-value pair constitutes an 'element' of the header field. message HeaderValueExtractor { - // The name of the header field to extract the value from. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // The element separator (e.g., ';' separates 'a;b;c;d'). - // Default: empty string. This causes the entirety of the header field to be extracted. - // If this field is set to an empty string and 'index' is used in the oneof below, 'index' - // must be set to 0. - string element_separator = 2; - // Specifies a header field's key value pair to match on. message KvElement { // The separator between key and value (e.g., '=' separates 'k=v;...'). @@ -522,12 +513,21 @@ message ScopedRoutes { // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string.min_bytes = 1]; + string separator = 1 [(validate.rules).string = {min_bytes: 1}]; // The key to match on. - string key = 2 [(validate.rules).string.min_bytes = 1]; + string key = 2 [(validate.rules).string = {min_bytes: 1}]; } + // The name of the header field to extract the value from. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The element separator (e.g., ';' separates 'a;b;c;d'). + // Default: empty string. This causes the entirety of the header field to be extracted. + // If this field is set to an empty string and 'index' is used in the oneof below, 'index' + // must be set to 0. + string element_separator = 2; + oneof extract_type { // Specifies the zero based index of the element to extract. // Note Envoy concatenates multiple values of the same header key into a comma separated @@ -548,16 +548,19 @@ message ScopedRoutes { } // The final scope key consists of the ordered union of these fragments. - repeated FragmentBuilder fragments = 1 [(validate.rules).repeated .min_items = 1]; + repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } + // The name assigned to the scoped routing configuration. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // The algorithm to use for constructing a scope key for each request. - ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message.required = true]; + ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; // Configuration source specifier for RDS. // This config source is used to subscribe to RouteConfiguration resources specified in // ScopedRouteConfiguration messages. - envoy.api.v2.core.ConfigSource rds_config_source = 3 [(validate.rules).message.required = true]; + api.v2.core.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; oneof config_specifier { option (validate.required) = true; @@ -580,14 +583,16 @@ message ScopedRoutes { message ScopedRds { // Configuration source specifier for scoped RDS. - envoy.api.v2.core.ConfigSource scoped_rds_config_source = 1 - [(validate.rules).message.required = true]; + api.v2.core.ConfigSource scoped_rds_config_source = 1 + [(validate.rules).message = {required: true}]; } message HttpFilter { + reserved 3; + // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. @@ -596,6 +601,4 @@ message HttpFilter { google.protobuf.Any typed_config = 4; } - - reserved 3; } diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto index 46ef44c96b94..724c8a3b4c40 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto +++ b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto @@ -16,7 +16,7 @@ import "validate/validate.proto"; message MongoProxy { // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The optional path to use for writing Mongo access logs. If not access log // path is specified no access logs will be written. Note that access log is @@ -27,7 +27,7 @@ message MongoProxy { // applied to the following MongoDB operations: Query, Insert, GetMore, // and KillCursors. Once an active delay is in progress, all incoming // data up until the timer event fires will be a part of the delay. - envoy.config.filter.fault.v2.FaultDelay delay = 3; + fault.v2.FaultDelay delay = 3; // Flag to specify whether :ref:`dynamic metadata // ` should be emitted. Defaults to false. diff --git a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto index f5d484fac1ba..9778f02bc342 100644 --- a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto @@ -18,14 +18,14 @@ import "validate/validate.proto"; message RateLimit { // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string.min_bytes = 1]; + string domain = 2 [(validate.rules).string = {min_bytes: 1}]; // The rate limit descriptor list to use in the rate limit service request. - repeated envoy.api.v2.ratelimit.RateLimitDescriptor descriptors = 3 - [(validate.rules).repeated .min_items = 1]; + repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 3 + [(validate.rules).repeated = {min_items: 1}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. @@ -40,6 +40,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6 - [(validate.rules).message.required = true]; + ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/network/rbac/v2/rbac.proto b/api/envoy/config/filter/network/rbac/v2/rbac.proto index c192b888e559..ea24eb50f431 100644 --- a/api/envoy/config/filter/network/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/network/rbac/v2/rbac.proto @@ -18,6 +18,17 @@ import "validate/validate.proto"; // Header should not be used in rules/shadow_rules in RBAC network filter as // this information is only available in :ref:`RBAC http filter `. message RBAC { + enum EnforcementType { + // Apply RBAC policies when the first byte of data arrives on the connection. + ONE_TIME_ON_FIRST_BYTE = 0; + + // Continuously apply RBAC policies as data arrives. Use this mode when + // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, + // etc. when the protocol decoders emit dynamic metadata such as the + // resources being accessed and the operations on the resources. + CONTINUOUS = 1; + } + // Specify the RBAC rules to be applied globally. // If absent, no enforcing RBAC policy will be applied. config.rbac.v2.RBAC rules = 1; @@ -28,18 +39,7 @@ message RBAC { config.rbac.v2.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string.min_bytes = 1]; - - enum EnforcementType { - // Apply RBAC policies when the first byte of data arrives on the connection. - ONE_TIME_ON_FIRST_BYTE = 0; - - // Continuously apply RBAC policies as data arrives. Use this mode when - // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, - // etc. when the protocol decoders emit dynamic metadata such as the - // resources being accessed and the operations on the resources. - CONTINUOUS = 1; - }; + string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index 78c56bb2efe6..6a9fa5c2ad53 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -17,29 +17,39 @@ import "validate/validate.proto"; // Redis Proxy :ref:`configuration overview `. message RedisProxy { - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - // Name of cluster from cluster manager. See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing cluster. - // - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch_all - // route` - // instead. - string cluster = 2 [deprecated = true]; - // Redis connection pool settings. message ConnPoolSettings { + // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently + // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data + // because replication is asynchronous and requires some delay. You need to ensure that your + // application can tolerate stale data. + enum ReadPolicy { + // Default mode. Read from the current master node. + MASTER = 0; + + // Read from the master, but if it is unavailable, read from replica nodes. + PREFER_MASTER = 1; + + // Read from replica nodes. If multiple replica nodes are present within a shard, a random + // node is selected. Healthy nodes have precedent over unhealthy nodes. + REPLICA = 2; + + // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not + // present or unhealthy), read from the master. + PREFER_REPLICA = 3; + + // Read from any node of the cluster. A random node is selected among the master and replicas, + // healthy nodes have precedent over unhealthy nodes. + ANY = 4; + } + // Per-operation timeout in milliseconds. The timer starts when the first // command of a pipeline is written to the backend connection. Each response received from Redis // resets the timer since it signifies that the next command is being processed by the backend. // The only exception to this behavior is when a connection to a backend is not yet established. // In that case, the connect timeout on the cluster will govern the timeout until the connection // is ready. - google.protobuf.Duration op_timeout = 1 [(validate.rules).duration.required = true]; + google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}]; // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be // forwarded to the same upstream. The hash key used for determining the upstream in a @@ -92,49 +102,12 @@ message RedisProxy { // count. bool enable_command_stats = 8; - // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently - // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data - // because replication is asynchronous and requires some delay. You need to ensure that your - // application can tolerate stale data. - enum ReadPolicy { - // Default mode. Read from the current master node. - MASTER = 0; - // Read from the master, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1; - // Read from replica nodes. If multiple replica nodes are present within a shard, a random - // node is selected. Healthy nodes have precedent over unhealthy nodes. - REPLICA = 2; - // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. - PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. - ANY = 4; - } - // Read policy. The default is to read from the master. - ReadPolicy read_policy = 7 [(validate.rules).enum.defined_only = true]; + ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } - // Network settings for the connection pool to the upstream clusters. - ConnPoolSettings settings = 3 [(validate.rules).message.required = true]; - - // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. - bool latency_in_micros = 4; - message PrefixRoutes { message Route { - // String prefix that must match the beginning of the keys. Envoy will always favor the - // longest match. - string prefix = 1; - - // Indicates if the prefix needs to be removed from the key when forwarded. - bool remove_prefix = 2; - - // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string.min_bytes = 1]; - // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are @@ -142,7 +115,7 @@ message RedisProxy { message RequestMirrorPolicy { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // If not specified or the runtime key is not present, all requests to the target cluster // will be mirrored. @@ -156,13 +129,23 @@ message RedisProxy { // is an integral percentage out of 100. For instance, a runtime key lookup returning the // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is // HUNDRED. - envoy.api.v2.core.RuntimeFractionalPercent runtime_fraction = 2; + api.v2.core.RuntimeFractionalPercent runtime_fraction = 2; // Set this to TRUE to only mirror write commands, this is effectively replicating the // writes in a "fire and forget" manner. bool exclude_read_commands = 3; } + // String prefix that must match the beginning of the keys. Envoy will always favor the + // longest match. + string prefix = 1; + + // Indicates if the prefix needs to be removed from the key when forwarded. + bool remove_prefix = 2; + + // Upstream cluster to forward the command to. + string cluster = 3 [(validate.rules).string = {min_bytes: 1}]; + // Indicates that the route has a request mirroring policy. repeated RequestMirrorPolicy request_mirror_policy = 4; } @@ -187,6 +170,27 @@ message RedisProxy { Route catch_all_route = 4; } + // The prefix to use when emitting :ref:`statistics `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Name of cluster from cluster manager. See the :ref:`configuration section + // ` of the architecture overview for recommendations on + // configuring the backing cluster. + // + // .. attention:: + // + // This field is deprecated. Use a :ref:`catch_all + // route` + // instead. + string cluster = 2 [deprecated = true]; + + // Network settings for the connection pool to the upstream clusters. + ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; + + // Indicates that latency stat should be computed in microseconds. By default it is computed in + // milliseconds. + bool latency_in_micros = 4; + // List of **unique** prefixes used to separate keys from different workloads to different // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all // cluster can be used to forward commands when there is no match. Time complexity of the @@ -224,7 +228,7 @@ message RedisProxy { // password is set, then a "NOAUTH Authentication required." error response will be sent to the // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no password is set" error will be returned. - envoy.api.v2.core.DataSource downstream_auth_password = 6; + api.v2.core.DataSource downstream_auth_password = 6; } // RedisProtocolOptions specifies Redis upstream protocol options. This object is used in @@ -233,5 +237,5 @@ message RedisProxy { message RedisProtocolOptions { // Upstream server password as defined by the `requirepass directive // `_ in the server's configuration file. - envoy.api.v2.core.DataSource auth_password = 1; + api.v2.core.DataSource auth_password = 1; } diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index 8e4453dd9f7d..0ac01842159f 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -6,9 +6,9 @@ option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2"; -import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; +import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -19,49 +19,6 @@ import "validate/validate.proto"; // TCP Proxy :ref:`configuration overview `. message TcpProxy { - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - // - string cluster = 2; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } - - // Optional endpoint metadata match criteria. Only endpoints in the upstream - // cluster with metadata matching that set in metadata_match will be - // considered. The filter name should be specified as *envoy.lb*. - envoy.api.v2.core.Metadata metadata_match = 9; - - // The idle timeout for connections managed by the TCP proxy filter. The idle timeout - // is defined as the period in which there are no bytes sent or received on either - // the upstream or downstream connection. If not set, connections will never be closed - // by the TCP proxy due to being idle. - google.protobuf.Duration idle_timeout = 8 [(validate.rules).duration.gt = {}]; - - // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy - // filter. The idle timeout is defined as the period in which there is no - // active traffic. If not set, there is no idle timeout. When the idle timeout - // is reached the connection will be closed. The distinction between - // downstream_idle_timeout/upstream_idle_timeout provides a means to set - // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; - - // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; - - // Configuration for :ref:`access logs ` - // emitted by the this tcp_proxy. - repeated envoy.config.filter.accesslog.v2.AccessLog access_log = 5; - // [#not-implemented-hide:] Deprecated. // TCP Proxy filter configuration using V1 format. message DeprecatedV1 { @@ -75,7 +32,7 @@ message TcpProxy { message TCPRoute { // The cluster to connect to when a the downstream network connection // matches the specified criteria. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // An optional list of IP address subnets in the form // “ip_address/xx”. The criteria is satisfied if the destination IP @@ -85,7 +42,7 @@ message TcpProxy { // address of the downstream connection might be different from the // addresses on which the proxy is listening if the connection has been // redirected. - repeated envoy.api.v2.core.CidrRange destination_ip_list = 2; + repeated api.v2.core.CidrRange destination_ip_list = 2; // An optional string containing a comma-separated list of port numbers // or ranges. The criteria is satisfied if the destination port of the @@ -101,7 +58,7 @@ message TcpProxy { // of the downstream connection is contained in at least one of the // specified subnets. If the parameter is not specified or the list is // empty, the source IP address is ignored. - repeated envoy.api.v2.core.CidrRange source_ip_list = 4; + repeated api.v2.core.CidrRange source_ip_list = 4; // An optional string containing a comma-separated list of port numbers // or ranges. The criteria is satisfied if the source port of the @@ -113,31 +70,74 @@ message TcpProxy { // The route table for the filter. All filter instances must have a route // table, even if it is empty. - repeated TCPRoute routes = 1 [(validate.rules).repeated .min_items = 1]; + repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; } - // [#not-implemented-hide:] Deprecated. - DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; - - // The maximum number of unsuccessful connection attempts that will be made before - // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32.gte = 1]; - // Allows for specification of multiple upstream clusters along with weights // that indicate the percentage of traffic to be forwarded to each cluster. // The router selects an upstream cluster based on these weights. message WeightedCluster { message ClusterWeight { // Name of the upstream cluster. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the // clusters array determines the total weight. - uint32 weight = 2 [(validate.rules).uint32.gte = 1]; + uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; } // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } + + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof cluster_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + // + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + api.v2.core.Metadata metadata_match = 9; + + // The idle timeout for connections managed by the TCP proxy filter. The idle timeout + // is defined as the period in which there are no bytes sent or received on either + // the upstream or downstream connection. If not set, connections will never be closed + // by the TCP proxy due to being idle. + google.protobuf.Duration idle_timeout = 8 [(validate.rules).duration = {gt {}}]; + + // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy + // filter. The idle timeout is defined as the period in which there is no + // active traffic. If not set, there is no idle timeout. When the idle timeout + // is reached the connection will be closed. The distinction between + // downstream_idle_timeout/upstream_idle_timeout provides a means to set + // timeout based on the last byte sent on the downstream/upstream connection. + google.protobuf.Duration downstream_idle_timeout = 3; + + // [#not-implemented-hide:] + google.protobuf.Duration upstream_idle_timeout = 4; + + // Configuration for :ref:`access logs ` + // emitted by the this tcp_proxy. + repeated accesslog.v2.AccessLog access_log = 5; + + // [#not-implemented-hide:] Deprecated. + DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; + + // The maximum number of unsuccessful connection attempts that will be made before + // giving up. If the parameter is not specified, 1 connection attempt will be made. + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; } diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto index 33d120047159..0f67a20c2180 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto @@ -29,10 +29,10 @@ message RouteConfiguration { // [#comment:next free field: 3] message Route { // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message.required = true]; + RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message.required = true]; + RouteAction route = 2 [(validate.rules).message = {required: true}]; } // [#comment:next free field: 5] @@ -69,7 +69,7 @@ message RouteMatch { // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). Note that this only applies for Thrift transports and/or // protocols that support headers. - repeated envoy.api.v2.route.HeaderMatcher headers = 4; + repeated api.v2.route.HeaderMatcher headers = 4; } // [#comment:next free field: 5] @@ -79,7 +79,7 @@ message RouteAction { // Indicates a single upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -93,12 +93,12 @@ message RouteAction { // `, // with values there taking precedence. Keys and values should be provided under the "envoy.lb" // metadata key. - envoy.api.v2.core.Metadata metadata_match = 3; + api.v2.core.Metadata metadata_match = 3; // Specifies a set of rate limit configurations that could be applied to the route. // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders // action with the header name ":method-name". - repeated envoy.api.v2.route.RateLimit rate_limits = 4; + repeated api.v2.route.RateLimit rate_limits = 4; } // Allows for specification of multiple upstream clusters along with weights that indicate the @@ -107,12 +107,12 @@ message RouteAction { message WeightedCluster { message ClusterWeight { // Name of the upstream cluster. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total // weight. - google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32.gte = 1]; + google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field, combined with what's @@ -120,9 +120,9 @@ message WeightedCluster { // `, // will be considered. Values here will take precedence. Keys and values should be provided // under the "envoy.lb" metadata key. - envoy.api.v2.core.Metadata metadata_match = 3; + api.v2.core.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto index 823a1747527b..e925cfe697d8 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto @@ -16,32 +16,8 @@ import "validate/validate.proto"; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. -// [#comment:next free field: 6] -message ThriftProxy { - // Supplies the type of transport that the Thrift proxy should use. Defaults to - // :ref:`AUTO_TRANSPORT`. - TransportType transport = 2 [(validate.rules).enum.defined_only = true]; - - // Supplies the type of protocol that the Thrift proxy should use. Defaults to - // :ref:`AUTO_PROTOCOL`. - ProtocolType protocol = 3 [(validate.rules).enum.defined_only = true]; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - // The route table for the connection manager is static and is specified in this property. - RouteConfiguration route_config = 4; - - // A list of individual Thrift filters that make up the filter chain for requests made to the - // Thrift proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no thrift_filters are specified, a default Thrift router filter - // (`envoy.filters.thrift.router`) is used. - repeated ThriftFilter thrift_filters = 5; -} - // Thrift transport types supported by Envoy. enum TransportType { - // For downstream connections, the Thrift proxy will attempt to determine which transport to use. // For upstream connections, the Thrift proxy will use same transport as the downstream // connection. @@ -59,7 +35,6 @@ enum TransportType { // Thrift Protocol types supported by Envoy. enum ProtocolType { - // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol // detection. For upstream connections, the Thrift proxy will use the same protocol as the @@ -79,6 +54,29 @@ enum ProtocolType { TWITTER = 4; } +// [#comment:next free field: 6] +message ThriftProxy { + // Supplies the type of transport that the Thrift proxy should use. Defaults to + // :ref:`AUTO_TRANSPORT`. + TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use. Defaults to + // :ref:`AUTO_PROTOCOL`. + ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is static and is specified in this property. + RouteConfiguration route_config = 4; + + // A list of individual Thrift filters that make up the filter chain for requests made to the + // Thrift proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no thrift_filters are specified, a default Thrift router filter + // (`envoy.filters.thrift.router`) is used. + repeated ThriftFilter thrift_filters = 5; +} + // ThriftFilter configures a Thrift filter. // [#comment:next free field: 3] message ThriftFilter { @@ -88,7 +86,7 @@ message ThriftFilter { // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. @@ -108,11 +106,11 @@ message ThriftProtocolOptions { // Selecting // :ref:`AUTO_TRANSPORT`, // which is the default, causes the proxy to use the same transport as the downstream connection. - TransportType transport = 1 [(validate.rules).enum.defined_only = true]; + TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; // Supplies the type of protocol that the Thrift proxy should use for upstream connections. // Selecting // :ref:`AUTO_PROTOCOL`, // which is the default, causes the proxy to use the same protocol as the downstream connection. - ProtocolType protocol = 2 [(validate.rules).enum.defined_only = true]; + ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto index ff2463b26c6c..5cc681ff6010 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto +++ b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto @@ -18,7 +18,7 @@ import "validate/validate.proto"; // [#comment:next free field: 5] message RateLimit { // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string.min_bytes = 1]; + string domain = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies the rate limit configuration stage. Each configured rate limit filter performs a // rate limit check using descriptors configured in the @@ -29,7 +29,7 @@ message RateLimit { // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32.lte = 10]; + uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. @@ -44,6 +44,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5 - [(validate.rules).message.required = true]; + ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto index e7a7bf94cce6..154572901a7c 100644 --- a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto @@ -1,23 +1,23 @@ syntax = "proto3"; -// [#protodoc-title: Grpc Credentials AWS IAM] -// Configuration for AWS IAM Grpc Credentials Plugin - package envoy.config.grpc_credential.v2alpha; option java_outer_classname = "AwsIamProto"; -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; import "validate/validate.proto"; +// [#protodoc-title: Grpc Credentials AWS IAM] +// Configuration for AWS IAM Grpc Credentials Plugin + message AwsIamConfig { // The `service namespace // `_ // of the Grpc endpoint. // // Example: appmesh - string service_name = 1 [(validate.rules).string.min_bytes = 1]; + string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The `region `_ hosting the Grpc // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment diff --git a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto index 1746492fe261..cd9f27d71e45 100644 --- a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto +++ b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto @@ -1,8 +1,5 @@ syntax = "proto3"; -// [#protodoc-title: Grpc Credentials File Based Metadata] -// Configuration for File Based Metadata Grpc Credentials Plugin - package envoy.config.grpc_credential.v2alpha; option java_outer_classname = "FileBasedMetadataProto"; @@ -11,11 +8,13 @@ option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; import "envoy/api/v2/core/base.proto"; -message FileBasedMetadataConfig { +// [#protodoc-title: Grpc Credentials File Based Metadata] +// Configuration for File Based Metadata Grpc Credentials Plugin +message FileBasedMetadataConfig { // Location or inline data of secret to use for authentication of the Google gRPC connection // this secret will be attached to a header of the gRPC connection - envoy.api.v2.core.DataSource secret_data = 1; + api.v2.core.DataSource secret_data = 1; // Metadata header key to use for sending the secret data // if no header key is set, "authorization" header will be used diff --git a/api/envoy/config/metrics/v2/metrics_service.proto b/api/envoy/config/metrics/v2/metrics_service.proto index 208a5c2076ed..da53e5a52fdc 100644 --- a/api/envoy/config/metrics/v2/metrics_service.proto +++ b/api/envoy/config/metrics/v2/metrics_service.proto @@ -1,7 +1,5 @@ syntax = "proto3"; -// [#protodoc-title: Metrics service] - package envoy.config.metrics.v2; option java_outer_classname = "MetricsServiceProto"; @@ -12,10 +10,12 @@ import "envoy/api/v2/core/grpc_service.proto"; import "validate/validate.proto"; +// [#protodoc-title: Metrics service] + // Metrics Service is configured as a built-in *envoy.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to create // Metrics Service. message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. - envoy.api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; + api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto index fea8b9b0f878..e82f90484cb2 100644 --- a/api/envoy/config/metrics/v2/stats.proto +++ b/api/envoy/config/metrics/v2/stats.proto @@ -1,6 +1,3 @@ -// [#protodoc-title: Stats] -// Statistics :ref:`architecture overview `. - syntax = "proto3"; package envoy.config.metrics.v2; @@ -18,6 +15,9 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; +// [#protodoc-title: Stats] +// Statistics :ref:`architecture overview `. + // Configuration for pluggable stats sinks. message StatsSink { // The name of the stats sink to instantiate. The name must match a supported @@ -149,12 +149,12 @@ message StatsMatcher { // Exclusive match. All stats are enabled except for those matching one of the supplied // StringMatcher protos. - envoy.type.matcher.ListStringMatcher exclusion_list = 2; + type.matcher.ListStringMatcher exclusion_list = 2; // Inclusive match. No stats are enabled except for those matching one of the supplied // StringMatcher protos. - envoy.type.matcher.ListStringMatcher inclusion_list = 3; - }; + type.matcher.ListStringMatcher inclusion_list = 3; + } } // Designates a tag name and value pair. The value may be either a fixed value @@ -231,7 +231,7 @@ message TagSpecifier { // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag // ``envoy.http_conn_manager_prefix`` will be added with the tag value // ``connection_manager_1``. - string regex = 2 [(validate.rules).string.max_bytes = 1024]; + string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; // Specifies a fixed tag value for the ``tag_name``. string fixed_value = 3; @@ -247,13 +247,14 @@ message StatsdSink { // The UDP address of a running `statsd `_ // compliant listener. If specified, statistics will be flushed to this // address. - envoy.api.v2.core.Address address = 1; + api.v2.core.Address address = 1; // The name of a cluster that is running a TCP `statsd // `_ compliant listener. If specified, // Envoy will connect to this cluster to flush statistics. string tcp_cluster_name = 2; } + // Optional custom prefix for StatsdSink. If // specified, this will override the default prefix. // For example: @@ -289,16 +290,16 @@ message StatsdSink { // `. // [#comment:next free field: 3] message DogStatsdSink { + reserved 2; + oneof dog_statsd_specifier { option (validate.required) = true; // The UDP address of a running DogStatsD compliant listener. If specified, // statistics will be flushed to this address. - envoy.api.v2.core.Address address = 1; + api.v2.core.Address address = 1; } - reserved 2; - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; diff --git a/api/envoy/config/overload/v2alpha/overload.proto b/api/envoy/config/overload/v2alpha/overload.proto index e32764675cb5..dad27524bdb0 100644 --- a/api/envoy/config/overload/v2alpha/overload.proto +++ b/api/envoy/config/overload/v2alpha/overload.proto @@ -27,7 +27,7 @@ message ResourceMonitor { // ` // * :ref:`envoy.resource_monitors.injected_resource // ` - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Configuration for the resource monitor being instantiated. oneof config_type { @@ -40,15 +40,16 @@ message ResourceMonitor { message ThresholdTrigger { // If the resource pressure is greater than or equal to this value, the trigger // will fire. - double value = 1 [(validate.rules).double = {gte: 0, lte: 1}]; + double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; } message Trigger { // The name of the resource this is a trigger for. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof trigger_oneof { option (validate.required) = true; + ThresholdTrigger threshold = 2; } } @@ -57,12 +58,12 @@ message OverloadAction { // The name of the overload action. This is just a well-known string that listeners can // use for registering callbacks. Custom overload actions should be named using reverse // DNS to ensure uniqueness. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A set of triggers for this action. If any of these triggers fire the overload action // is activated. Listeners are notified when the overload action transitions from // inactivated to activated, or vice versa. - repeated Trigger triggers = 2 [(validate.rules).repeated .min_items = 1]; + repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; } message OverloadManager { @@ -70,7 +71,7 @@ message OverloadManager { google.protobuf.Duration refresh_interval = 1; // The set of resources to monitor. - repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated .min_items = 1]; + repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; // The set of overload actions. repeated OverloadAction actions = 3; diff --git a/api/envoy/config/ratelimit/v2/rls.proto b/api/envoy/config/ratelimit/v2/rls.proto index 55577d4ab013..184b3ec081e1 100644 --- a/api/envoy/config/ratelimit/v2/rls.proto +++ b/api/envoy/config/ratelimit/v2/rls.proto @@ -14,12 +14,10 @@ import "validate/validate.proto"; // Rate limit :ref:`configuration overview `. message RateLimitServiceConfig { - reserved 1; + reserved 1, 3; // Specifies the gRPC service that hosts the rate limit service. The client // will connect to this cluster when it needs to make rate limit service // requests. - envoy.api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; - - reserved 3; + api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/rbac/v2/rbac.proto b/api/envoy/config/rbac/v2/rbac.proto index 1d797d418681..e29e1a1b30d0 100644 --- a/api/envoy/config/rbac/v2/rbac.proto +++ b/api/envoy/config/rbac/v2/rbac.proto @@ -1,6 +1,11 @@ syntax = "proto3"; -import "validate/validate.proto"; +package envoy.config.rbac.v2; + +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.rbac.v2"; + import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/route/route.proto"; import "envoy/type/matcher/metadata.proto"; @@ -8,11 +13,7 @@ import "envoy/type/matcher/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; -package envoy.config.rbac.v2; - -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.config.rbac.v2"; +import "validate/validate.proto"; // [#protodoc-title: Role Based Access Control (RBAC)] @@ -84,12 +85,12 @@ message Policy { // Required. The set of permissions that define a role. Each permission is matched with OR // semantics. To match all actions for this policy, a single Permission with the `any` field set // to true should be used. - repeated Permission permissions = 1 [(validate.rules).repeated .min_items = 1]; + repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; // Required. The set of principals that are assigned/denied the role based on “action”. Each // principal is matched with OR semantics. To match all downstreams for this policy, a single // Principal with the `any` field set to true should be used. - repeated Principal principals = 2 [(validate.rules).repeated .min_items = 1]; + repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; // An optional symbolic expression specifying an access control // :ref:`condition `. The condition is combined @@ -99,11 +100,10 @@ message Policy { // Permission defines an action (or actions) that a principal can take. message Permission { - // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, // each are applied with the associated behavior. message Set { - repeated Permission rules = 1 [(validate.rules).repeated .min_items = 1]; + repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; } oneof rule { @@ -116,20 +116,20 @@ message Permission { Set or_rules = 2; // When any is set, it matches any action. - bool any = 3 [(validate.rules).bool.const = true]; + bool any = 3 [(validate.rules).bool = {const: true}]; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. - envoy.api.v2.route.HeaderMatcher header = 4; + api.v2.route.HeaderMatcher header = 4; // A CIDR block that describes the destination IP. - envoy.api.v2.core.CidrRange destination_ip = 5; + api.v2.core.CidrRange destination_ip = 5; // A port number that describes the destination port connecting to. - uint32 destination_port = 6 [(validate.rules).uint32.lte = 65535]; + uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. - envoy.type.matcher.MetadataMatcher metadata = 7; + type.matcher.MetadataMatcher metadata = 7; // Negates matching the provided permission. For instance, if the value of `not_rule` would // match, this permission would not match. Conversely, if the value of `not_rule` would not @@ -155,28 +155,26 @@ message Permission { // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. - envoy.type.matcher.StringMatcher requested_server_name = 9; + type.matcher.StringMatcher requested_server_name = 9; } } // Principal defines an identity or a group of identities for a downstream subject. message Principal { - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, // each are applied with the associated behavior. message Set { - repeated Principal ids = 1 [(validate.rules).repeated .min_items = 1]; + repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; } // Authentication attributes for a downstream. message Authenticated { reserved 1; - reserved "name"; // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the // certificate, otherwise the subject field is used. If unset, it applies to any user that is // authenticated. - envoy.type.matcher.StringMatcher principal_name = 2; + type.matcher.StringMatcher principal_name = 2; } oneof identifier { @@ -189,20 +187,20 @@ message Principal { Set or_ids = 2; // When any is set, it matches any downstream. - bool any = 3 [(validate.rules).bool.const = true]; + bool any = 3 [(validate.rules).bool = {const: true}]; // Authenticated attributes that identify the downstream. Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. - envoy.api.v2.core.CidrRange source_ip = 5; + api.v2.core.CidrRange source_ip = 5; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. - envoy.api.v2.route.HeaderMatcher header = 6; + api.v2.route.HeaderMatcher header = 6; // Metadata that describes additional information about the principal. - envoy.type.matcher.MetadataMatcher metadata = 7; + type.matcher.MetadataMatcher metadata = 7; // Negates matching the provided principal. For instance, if the value of `not_id` would match, // this principal would not match. Conversely, if the value of `not_id` would not match, this diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto index 110123e3c332..ad3d1ab1eada 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto +++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto @@ -14,5 +14,5 @@ import "validate/validate.proto"; // fraction of currently reserved heap memory divided by a statically configured maximum // specified in the FixedHeapConfig. message FixedHeapConfig { - uint64 max_heap_size_bytes = 1 [(validate.rules).uint64.gt = 0]; + uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}]; } diff --git a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto index 64c984fa0cb3..2a8a1a43150c 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto +++ b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto @@ -15,5 +15,5 @@ import "validate/validate.proto"; // the resource pressure and be updated atomically by a symbolic link swap. // This is intended primarily for integration tests to force Envoy into an overloaded state. message InjectedResourceConfig { - string filename = 1 [(validate.rules).string.min_bytes = 1]; + string filename = 1 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto index 7b4df8bf5258..f4b1d8391008 100644 --- a/api/envoy/config/trace/v2/trace.proto +++ b/api/envoy/config/trace/v2/trace.proto @@ -1,6 +1,3 @@ -// [#protodoc-title: Tracing] -// Tracing :ref:`architecture overview `. - syntax = "proto3"; package envoy.config.trace.v2; @@ -10,15 +7,18 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.trace.v2"; import "envoy/api/v2/core/grpc_service.proto"; -import "opencensus/proto/trace/v1/trace_config.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; - import "google/protobuf/wrappers.proto"; +import "opencensus/proto/trace/v1/trace_config.proto"; + import "validate/validate.proto"; +// [#protodoc-title: Tracing] +// Tracing :ref:`architecture overview `. + // The tracing configuration specifies global // settings for the HTTP tracer used by Envoy. The configuration is defined by // the :ref:`Bootstrap ` :ref:`tracing @@ -34,7 +34,7 @@ message Tracing { // - *envoy.dynamic.ot* // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: @@ -50,6 +50,7 @@ message Tracing { google.protobuf.Any typed_config = 3; } } + // Provides configuration for the HTTP tracer. Http http = 1; } @@ -57,33 +58,15 @@ message Tracing { // Configuration for the LightStep tracer. message LightstepConfig { // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // File containing the access token to the `LightStep // `_ API. - string access_token_file = 2 [(validate.rules).string.min_bytes = 1]; + string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; } // Configuration for the Zipkin tracer. message ZipkinConfig { - // The cluster manager cluster that hosts the Zipkin collectors. Note that the - // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster - // resources `. - string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; - - // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation, the API endpoint is typically - // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string.min_bytes = 1]; - - // Determines whether a 128bit trace id will be used when creating a new - // trace instance. The default value is false, which will result in a 64 bit trace id being used. - bool trace_id_128bit = 3; - - // Determines whether client and server spans will share the same span context. - // The default value is true. - google.protobuf.BoolValue shared_span_context = 4; - // Available Zipkin collector endpoint versions. enum CollectorEndpointVersion { // Zipkin API v1, JSON over HTTP. @@ -93,7 +76,7 @@ message ZipkinConfig { // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, // since in Zipkin realm this v1 version is considered to be not preferable anymore.] - HTTP_JSON_V1 = 0 [deprecated = true]; + HTTP_JSON_V1 = 0; // Zipkin API v2, JSON over HTTP. HTTP_JSON = 1; @@ -105,6 +88,24 @@ message ZipkinConfig { GRPC = 3; } + // The cluster manager cluster that hosts the Zipkin collectors. Note that the + // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster + // resources `. + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The API endpoint of the Zipkin service where the spans will be sent. When + // using a standard Zipkin installation, the API endpoint is typically + // /api/v1/spans, which is the default value. + string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Determines whether a 128bit trace id will be used when creating a new + // trace instance. The default value is false, which will result in a 64 bit trace id being used. + bool trace_id_128bit = 3; + + // Determines whether client and server spans will share the same span context. + // The default value is true. + google.protobuf.BoolValue shared_span_context = 4; + // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be // used. CollectorEndpointVersion collector_endpoint_version = 5; @@ -116,7 +117,7 @@ message ZipkinConfig { message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. - string library = 1 [(validate.rules).string.min_bytes = 1]; + string library = 1 [(validate.rules).string = {min_bytes: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. @@ -126,14 +127,34 @@ message DynamicOtConfig { // Configuration for the Datadog tracer. message DatadogConfig { // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string.min_bytes = 1]; + string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; } // Configuration for the OpenCensus tracer. // [#proto-status: experimental] message OpenCensusConfig { + enum TraceContext { + // No-op default, no trace context is utilized. + NONE = 0; + + // W3C Trace-Context format "traceparent:" header. + TRACE_CONTEXT = 1; + + // Binary "grpc-trace-bin:" header. + GRPC_TRACE_BIN = 2; + + // "X-Cloud-Trace-Context:" header. + CLOUD_TRACE_CONTEXT = 3; + + // X-B3-* headers. + B3 = 4; + } + + reserved 7; + // Configures tracing, e.g. the sampler, max number of annotations, etc. opencensus.proto.trace.v1.TraceConfig trace_config = 1; @@ -169,25 +190,6 @@ message OpenCensusConfig { // format: https://github.com/grpc/grpc/blob/master/doc/naming.md string ocagent_address = 12; - reserved 7; - - enum TraceContext { - // No-op default, no trace context is utilized. - NONE = 0; - - // W3C Trace-Context format "traceparent:" header. - TRACE_CONTEXT = 1; - - // Binary "grpc-trace-bin:" header. - GRPC_TRACE_BIN = 2; - - // "X-Cloud-Trace-Context:" header. - CLOUD_TRACE_CONTEXT = 3; - - // X-B3-* headers. - B3 = 4; - } - // List of incoming trace context headers we will accept. First one found // wins. repeated TraceContext incoming_trace_context = 8; @@ -199,5 +201,5 @@ message OpenCensusConfig { // Configuration structure. message TraceServiceConfig { // The upstream gRPC cluster that hosts the metrics service. - envoy.api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; + api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto index e68b40dae530..ffb121fcb061 100644 --- a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto +++ b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto @@ -6,20 +6,20 @@ option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha"; -// [#protodoc-title: Tap] - -import "envoy/config/common/tap/v2alpha/common.proto"; import "envoy/api/v2/core/base.proto"; +import "envoy/config/common/tap/v2alpha/common.proto"; import "validate/validate.proto"; +// [#protodoc-title: Tap] + // Configuration for tap transport socket. This wraps another transport socket, providing the // ability to interpose and record in plain text any traffic that is surfaced to Envoy. message Tap { // Common configuration for the tap transport socket. common.tap.v2alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; // The underlying transport socket being wrapped. - api.v2.core.TransportSocket transport_socket = 2 [(validate.rules).message.required = true]; + api.v2.core.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/data/accesslog/v2/accesslog.proto b/api/envoy/data/accesslog/v2/accesslog.proto index 8aa38f47a36f..0daebb6390e7 100644 --- a/api/envoy/data/accesslog/v2/accesslog.proto +++ b/api/envoy/data/accesslog/v2/accesslog.proto @@ -12,6 +12,7 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; // [#protodoc-title: gRPC access logs] @@ -34,16 +35,20 @@ message TCPAccessLogEntry { } message HTTPAccessLogEntry { - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - // HTTP version enum HTTPVersion { PROTOCOL_UNSPECIFIED = 0; + HTTP10 = 1; + HTTP11 = 2; + HTTP2 = 3; } + + // Common properties shared by all Envoy access logs. + AccessLogCommon common_properties = 1; + HTTPVersion protocol_version = 2; // Description of the incoming HTTP request. @@ -67,15 +72,15 @@ message AccessLogCommon { // [#not-implemented-hide:] // This field indicates the rate at which this log entry was sampled. // Valid range is (0.0, 1.0]. - double sample_rate = 1 [(validate.rules).double.gt = 0.0, (validate.rules).double.lte = 1.0]; + double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; // This field is the remote/origin address on which the request from the user was received. // Note: This may not be the physical peer. E.g, if the remote address is inferred from for // example the x-forwarder-for header, proxy protocol, etc. - envoy.api.v2.core.Address downstream_remote_address = 2; + api.v2.core.Address downstream_remote_address = 2; // This field is the local/destination address on which the request from the user was received. - envoy.api.v2.core.Address downstream_local_address = 3; + api.v2.core.Address downstream_local_address = 3; // If the connection is secure,S this field will contain TLS properties. TLSProperties tls_properties = 4; @@ -124,10 +129,10 @@ message AccessLogCommon { // The upstream remote/destination address that handles this exchange. This does not include // retries. - envoy.api.v2.core.Address upstream_remote_address = 13; + api.v2.core.Address upstream_remote_address = 13; // The upstream local/origin address that handles this exchange. This does not include retries. - envoy.api.v2.core.Address upstream_local_address = 14; + api.v2.core.Address upstream_local_address = 14; // The upstream cluster that *upstream_remote_address* belongs to. string upstream_cluster = 15; @@ -143,7 +148,7 @@ message AccessLogCommon { // route created from a higher level forwarding rule with some ID can place // that ID in this field and cross reference later. It can also be used to // determine if a canary endpoint was used or not. - envoy.api.v2.core.Metadata metadata = 17; + api.v2.core.Metadata metadata = 17; // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the // failure reason from the transport socket. The format of this field depends on the configured @@ -157,11 +162,23 @@ message AccessLogCommon { // This field is the downstream direct remote address on which the request from the user was // received. Note: This is always the physical peer, even if the remote address is inferred from // for example the x-forwarder-for header, proxy protocol, etc. - envoy.api.v2.core.Address downstream_direct_remote_address = 20; + api.v2.core.Address downstream_direct_remote_address = 20; } // Flags indicating occurrences during request/response processing. message ResponseFlags { + message Unauthorized { + // Reasons why the request was unauthorized + enum Reason { + REASON_UNSPECIFIED = 0; + + // The request was denied by the external authorization service. + EXTERNAL_SERVICE = 1; + } + + Reason reason = 1; + } + // Indicates local server healthcheck failed. bool failed_local_healthcheck = 1; @@ -198,17 +215,6 @@ message ResponseFlags { // Indicates that the request was rate-limited locally. bool rate_limited = 12; - message Unauthorized { - // Reasons why the request was unauthorized - enum Reason { - REASON_UNSPECIFIED = 0; - // The request was denied by the external authorization service. - EXTERNAL_SERVICE = 1; - } - - Reason reason = 1; - } - // Indicates if the request was deemed unauthorized and the reason for it. Unauthorized unauthorized_details = 13; @@ -233,28 +239,21 @@ message ResponseFlags { message TLSProperties { enum TLSVersion { VERSION_UNSPECIFIED = 0; + TLSv1 = 1; + TLSv1_1 = 2; + TLSv1_2 = 3; + TLSv1_3 = 4; } - // Version of TLS that was negotiated. - TLSVersion tls_version = 1; - - // TLS cipher suite negotiated during handshake. The value is a - // four-digit hex code defined by the IANA TLS Cipher Suite Registry - // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). - // - // Here it is expressed as an integer. - google.protobuf.UInt32Value tls_cipher_suite = 2; - - // SNI hostname from handshake. - string tls_sni_hostname = 3; message CertificateProperties { message SubjectAltName { oneof san { string uri = 1; + // [#not-implemented-hide:] string dns = 2; } @@ -267,6 +266,19 @@ message TLSProperties { string subject = 2; } + // Version of TLS that was negotiated. + TLSVersion tls_version = 1; + + // TLS cipher suite negotiated during handshake. The value is a + // four-digit hex code defined by the IANA TLS Cipher Suite Registry + // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). + // + // Here it is expressed as an integer. + google.protobuf.UInt32Value tls_cipher_suite = 2; + + // SNI hostname from handshake. + string tls_sni_hostname = 3; + // Properties of the local certificate used to negotiate TLS. CertificateProperties local_certificate_properties = 4; @@ -281,7 +293,7 @@ message HTTPRequestProperties { // The request method (RFC 7231/2616). // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] - envoy.api.v2.core.RequestMethod request_method = 1; + api.v2.core.RequestMethod request_method = 1; // The scheme portion of the incoming request URI. string scheme = 2; diff --git a/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto b/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto index 1273f84d6df2..9ce85ce33354 100644 --- a/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto +++ b/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto @@ -14,35 +14,6 @@ import "validate/validate.proto"; // [#protodoc-title: Outlier detection logging events] // :ref:`Outlier detection logging `. -message OutlierDetectionEvent { - // In case of eject represents type of ejection that took place. - OutlierEjectionType type = 1 [(validate.rules).enum.defined_only = true]; - // Timestamp for event. - google.protobuf.Timestamp timestamp = 2; - // The time in seconds since the last action (either an ejection or unejection) took place. - google.protobuf.UInt64Value secs_since_last_action = 3; - // The :ref:`cluster ` that owns the ejected host. - string cluster_name = 4 [(validate.rules).string.min_bytes = 1]; - // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - string upstream_url = 5 [(validate.rules).string.min_bytes = 1]; - // The action that took place. - Action action = 6 [(validate.rules).enum.defined_only = true]; - // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to - // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and - // then re-added). - uint32 num_ejections = 7; - // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was - // ejected. ``false`` means the event was logged but the host was not actually ejected. - bool enforced = 8; - - oneof event { - option (validate.required) = true; - OutlierEjectSuccessRate eject_success_rate_event = 9; - OutlierEjectConsecutive eject_consecutive_event = 10; - OutlierEjectFailurePercentage eject_failure_percentage_event = 11; - } -} - // Type of ejection that took place enum OutlierEjectionType { // In case upstream host returns certain number of consecutive 5xx. @@ -52,8 +23,10 @@ enum OutlierEjectionType { // See :ref:`Cluster outlier detection ` documentation for // details. CONSECUTIVE_5XX = 0; + // In case upstream host returns certain number of consecutive gateway errors CONSECUTIVE_GATEWAY_FAILURE = 1; + // Runs over aggregated success rate statistics from every host in cluster // and selects hosts for which ratio of successful replies deviates from other hosts // in the cluster. @@ -63,12 +36,14 @@ enum OutlierEjectionType { // statistics. See :ref:`Cluster outlier detection ` // documentation for details. SUCCESS_RATE = 2; + // Consecutive local origin failures: Connection failures, resets, timeouts, etc // This type of ejection happens only when // :ref:`outlier_detection.split_external_local_origin_errors` // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; + // Runs over aggregated success rate statistics for local origin failures // for all hosts in the cluster and selects hosts for which success rate deviates from other // hosts in the cluster. This type of ejection happens only when @@ -76,9 +51,11 @@ enum OutlierEjectionType { // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for SUCCESS_RATE_LOCAL_ORIGIN = 4; + // Runs over aggregated success rate statistics from every host in cluster and selects hosts for // which ratio of failed replies is above configured value. FAILURE_PERCENTAGE = 5; + // Runs over aggregated success rate statistics for local origin failures from every host in // cluster and selects hosts for which ratio of failed replies is above configured value. FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; @@ -88,18 +65,60 @@ enum OutlierEjectionType { enum Action { // In case host was excluded from service EJECT = 0; + // In case host was brought back into service UNEJECT = 1; } +message OutlierDetectionEvent { + // In case of eject represents type of ejection that took place. + OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; + + // Timestamp for event. + google.protobuf.Timestamp timestamp = 2; + + // The time in seconds since the last action (either an ejection or unejection) took place. + google.protobuf.UInt64Value secs_since_last_action = 3; + + // The :ref:`cluster ` that owns the ejected host. + string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}]; + + // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. + string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}]; + + // The action that took place. + Action action = 6 [(validate.rules).enum = {defined_only: true}]; + + // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to + // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and + // then re-added). + uint32 num_ejections = 7; + + // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was + // ejected. ``false`` means the event was logged but the host was not actually ejected. + bool enforced = 8; + + oneof event { + option (validate.required) = true; + + OutlierEjectSuccessRate eject_success_rate_event = 9; + + OutlierEjectConsecutive eject_consecutive_event = 10; + + OutlierEjectFailurePercentage eject_failure_percentage_event = 11; + } +} + message OutlierEjectSuccessRate { // Host’s success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32.lte = 100]; + uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; + // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 // range. - uint32 cluster_average_success_rate = 2 [(validate.rules).uint32.lte = 100]; + uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; + // Success rate ejection threshold at the time of the ejection event. - uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32.lte = 100]; + uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; } message OutlierEjectConsecutive { @@ -107,5 +126,5 @@ message OutlierEjectConsecutive { message OutlierEjectFailurePercentage { // Host's success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32.lte = 100]; + uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; } diff --git a/api/envoy/data/core/v2alpha/health_check_event.proto b/api/envoy/data/core/v2alpha/health_check_event.proto index c5b2f70a5e24..454461b23ce3 100644 --- a/api/envoy/data/core/v2alpha/health_check_event.proto +++ b/api/envoy/data/core/v2alpha/health_check_event.proto @@ -15,10 +15,30 @@ import "validate/validate.proto"; // [#protodoc-title: Health check logging events] // :ref:`Health check logging `. +enum HealthCheckFailureType { + ACTIVE = 0; + + PASSIVE = 1; + + NETWORK = 2; +} + +enum HealthCheckerType { + HTTP = 0; + + TCP = 1; + + GRPC = 2; + + REDIS = 3; +} + message HealthCheckEvent { - HealthCheckerType health_checker_type = 1 [(validate.rules).enum.defined_only = true]; - envoy.api.v2.core.Address host = 2; - string cluster_name = 3 [(validate.rules).string.min_bytes = 1]; + HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}]; + + api.v2.core.Address host = 2; + + string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; oneof event { option (validate.required) = true; @@ -43,22 +63,9 @@ message HealthCheckEvent { google.protobuf.Timestamp timestamp = 6; } -enum HealthCheckFailureType { - ACTIVE = 0; - PASSIVE = 1; - NETWORK = 2; -} - -enum HealthCheckerType { - HTTP = 0; - TCP = 1; - GRPC = 2; - REDIS = 3; -} - message HealthCheckEjectUnhealthy { // The type of failure that caused this ejection. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum.defined_only = true]; + HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; } message HealthCheckAddHealthy { @@ -70,7 +77,8 @@ message HealthCheckAddHealthy { message HealthCheckFailure { // The type of failure that caused this event. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum.defined_only = true]; + HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; + // Whether this event is the result of the first ever health check on a host. bool first_check = 2; } diff --git a/api/envoy/data/tap/v2alpha/transport.proto b/api/envoy/data/tap/v2alpha/transport.proto index 9f3f79b4951f..a304ac3bf7a9 100644 --- a/api/envoy/data/tap/v2alpha/transport.proto +++ b/api/envoy/data/tap/v2alpha/transport.proto @@ -1,9 +1,5 @@ syntax = "proto3"; -// [#protodoc-title: Transport tap data] -// Trace format for the tap transport socket extension. This dumps plain text read/write -// sequences on a socket. - package envoy.data.tap.v2alpha; option java_outer_classname = "TransportProto"; @@ -15,20 +11,21 @@ import "envoy/data/tap/v2alpha/common.proto"; import "google/protobuf/timestamp.proto"; +// [#protodoc-title: Transport tap data] +// Trace format for the tap transport socket extension. This dumps plain text read/write +// sequences on a socket. + // Connection properties. message Connection { // Local address. - envoy.api.v2.core.Address local_address = 2; + api.v2.core.Address local_address = 2; // Remote address. - envoy.api.v2.core.Address remote_address = 3; + api.v2.core.Address remote_address = 3; } // Event in a socket trace. message SocketEvent { - // Timestamp for event. - google.protobuf.Timestamp timestamp = 1; - // Data read by Envoy from the transport socket. message Read { // TODO(htuch): Half-close for reads. @@ -51,10 +48,15 @@ message SocketEvent { // TODO(mattklein123): Close event type. } + // Timestamp for event. + google.protobuf.Timestamp timestamp = 1; + // Read or write with content as bytes string. oneof event_selector { Read read = 2; + Write write = 3; + Closed closed = 4; } } diff --git a/api/envoy/data/tap/v2alpha/wrapper.proto b/api/envoy/data/tap/v2alpha/wrapper.proto index a49cd3189b4d..597b22f014df 100644 --- a/api/envoy/data/tap/v2alpha/wrapper.proto +++ b/api/envoy/data/tap/v2alpha/wrapper.proto @@ -1,16 +1,16 @@ syntax = "proto3"; -import "envoy/data/tap/v2alpha/http.proto"; -import "envoy/data/tap/v2alpha/transport.proto"; - -import "validate/validate.proto"; - package envoy.data.tap.v2alpha; option java_outer_classname = "WrapperProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; +import "envoy/data/tap/v2alpha/http.proto"; +import "envoy/data/tap/v2alpha/transport.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: Tap data wrappers] // Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for diff --git a/api/envoy/service/accesslog/v2/als.proto b/api/envoy/service/accesslog/v2/als.proto index c06199a2b208..e3022af83bc9 100644 --- a/api/envoy/service/accesslog/v2/als.proto +++ b/api/envoy/service/accesslog/v2/als.proto @@ -35,29 +35,29 @@ message StreamAccessLogsResponse { message StreamAccessLogsMessage { message Identifier { // The node sending the access log messages over the stream. - envoy.api.v2.core.Node node = 1 [(validate.rules).message.required = true]; + api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. - string log_name = 2 [(validate.rules).string.min_bytes = 1]; + string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; } - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - // Wrapper for batches of HTTP access log entries. message HTTPAccessLogEntries { - repeated envoy.data.accesslog.v2.HTTPAccessLogEntry log_entry = 1 - [(validate.rules).repeated .min_items = 1]; + repeated data.accesslog.v2.HTTPAccessLogEntry log_entry = 1 + [(validate.rules).repeated = {min_items: 1}]; } // Wrapper for batches of TCP access log entries. message TCPAccessLogEntries { - repeated envoy.data.accesslog.v2.TCPAccessLogEntry log_entry = 1 - [(validate.rules).repeated .min_items = 1]; + repeated data.accesslog.v2.TCPAccessLogEntry log_entry = 1 + [(validate.rules).repeated = {min_items: 1}]; } + // Identifier data that will only be sent in the first message on the stream. This is effectively + // structured metadata and is a performance optimization. + Identifier identifier = 1; + // Batches of log entries of a single type. Generally speaking, a given stream should only // ever include one type of log entry. oneof log_entries { diff --git a/api/envoy/service/auth/v2/attribute_context.proto b/api/envoy/service/auth/v2/attribute_context.proto index b35e988d7b50..6e6986756af4 100644 --- a/api/envoy/service/auth/v2/attribute_context.proto +++ b/api/envoy/service/auth/v2/attribute_context.proto @@ -41,7 +41,7 @@ message AttributeContext { message Peer { // The address of the peer, this is typically the IP address. // It can also be UDS path, or others. - envoy.api.v2.core.Address address = 1; + api.v2.core.Address address = 1; // The canonical service name of the peer. // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster @@ -144,5 +144,5 @@ message AttributeContext { map context_extensions = 10; // Dynamic metadata associated with the request. - envoy.api.v2.core.Metadata metadata_context = 11; + api.v2.core.Metadata metadata_context = 11; } diff --git a/api/envoy/service/auth/v2/external_auth.proto b/api/envoy/service/auth/v2/external_auth.proto index 8a3d4f1a629e..7b2144cf8b8c 100644 --- a/api/envoy/service/auth/v2/external_auth.proto +++ b/api/envoy/service/auth/v2/external_auth.proto @@ -8,10 +8,11 @@ option java_package = "io.envoyproxy.envoy.service.auth.v2"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; -import "envoy/type/http_status.proto"; import "envoy/service/auth/v2/attribute_context.proto"; +import "envoy/type/http_status.proto"; import "google/rpc/status.proto"; + import "validate/validate.proto"; // [#protodoc-title: Authorization Service ] @@ -24,7 +25,8 @@ import "validate/validate.proto"; service Authorization { // Performs authorization check based on the attributes associated with the // incoming request, and returns status `OK` or not `OK`. - rpc Check(CheckRequest) returns (CheckResponse); + rpc Check(CheckRequest) returns (CheckResponse) { + } } message CheckRequest { @@ -36,11 +38,11 @@ message CheckRequest { message DeniedHttpResponse { // This field allows the authorization service to send a HTTP response status // code to the downstream client other than 403 (Forbidden). - envoy.type.HttpStatus status = 1 [(validate.rules).message.required = true]; + type.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers // to the downstream client. - repeated envoy.api.v2.core.HeaderValueOption headers = 2; + repeated api.v2.core.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data // to the downstream client. @@ -55,7 +57,7 @@ message OkHttpResponse { // the filter will append the correspondent header value to the matched request header. Note that // by Leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. - repeated envoy.api.v2.core.HeaderValueOption headers = 2; + repeated api.v2.core.HeaderValueOption headers = 2; } // Intended for gRPC and Network Authorization servers `only`. diff --git a/api/envoy/service/discovery/v2/ads.proto b/api/envoy/service/discovery/v2/ads.proto index 45a7407f0c44..63b129069ede 100644 --- a/api/envoy/service/discovery/v2/ads.proto +++ b/api/envoy/service/discovery/v2/ads.proto @@ -9,11 +9,6 @@ option java_generic_services = true; import "envoy/api/v2/discovery.proto"; -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message AdsDummy { -} - // [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, // and listeners are retained in the package `envoy.api.v2` for backwards // compatibility with existing management servers. New development in discovery @@ -27,11 +22,16 @@ message AdsDummy { // the multiplexed singleton APIs at the Envoy instance and management server. service AggregatedDiscoveryService { // This is a gRPC-only API. - rpc StreamAggregatedResources(stream envoy.api.v2.DiscoveryRequest) - returns (stream envoy.api.v2.DiscoveryResponse) { + rpc StreamAggregatedResources(stream api.v2.DiscoveryRequest) + returns (stream api.v2.DiscoveryResponse) { } - rpc DeltaAggregatedResources(stream envoy.api.v2.DeltaDiscoveryRequest) - returns (stream envoy.api.v2.DeltaDiscoveryResponse) { + rpc DeltaAggregatedResources(stream api.v2.DeltaDiscoveryRequest) + returns (stream api.v2.DeltaDiscoveryResponse) { } } + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message AdsDummy { +} diff --git a/api/envoy/service/discovery/v2/rtds.proto b/api/envoy/service/discovery/v2/rtds.proto index c8b53d670fe1..4dfe6f2a6645 100644 --- a/api/envoy/service/discovery/v2/rtds.proto +++ b/api/envoy/service/discovery/v2/rtds.proto @@ -17,22 +17,16 @@ import "validate/validate.proto"; // [#protodoc-title: Runtime Discovery Service (RTDS)] // RTDS :ref:`configuration overview ` -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message RtdsDummy { -} - // Discovery service for Runtime resources. service RuntimeDiscoveryService { - rpc StreamRuntime(stream envoy.api.v2.DiscoveryRequest) - returns (stream envoy.api.v2.DiscoveryResponse) { + rpc StreamRuntime(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { } - rpc DeltaRuntime(stream envoy.api.v2.DeltaDiscoveryRequest) - returns (stream envoy.api.v2.DeltaDiscoveryResponse) { + rpc DeltaRuntime(stream api.v2.DeltaDiscoveryRequest) + returns (stream api.v2.DeltaDiscoveryResponse) { } - rpc FetchRuntime(envoy.api.v2.DiscoveryRequest) returns (envoy.api.v2.DiscoveryResponse) { + rpc FetchRuntime(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { option (google.api.http) = { post: "/v2/discovery:runtime" body: "*" @@ -40,10 +34,16 @@ service RuntimeDiscoveryService { } } +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message RtdsDummy { +} + // RTDS resource type. This describes a layer in the runtime virtual filesystem. message Runtime { // Runtime resource name. This makes the Runtime a self-describing xDS // resource. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + google.protobuf.Struct layer = 2; } diff --git a/api/envoy/service/ratelimit/v2/rls.proto b/api/envoy/service/ratelimit/v2/rls.proto index 328bb547d630..ce52826a80b6 100644 --- a/api/envoy/service/ratelimit/v2/rls.proto +++ b/api/envoy/service/ratelimit/v2/rls.proto @@ -5,6 +5,7 @@ package envoy.service.ratelimit.v2; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.ratelimit.v2"; +option java_generic_services = true; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/ratelimit/ratelimit.proto"; @@ -34,7 +35,7 @@ message RateLimitRequest { // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is // processed by the service (see below). If any of the descriptors are over limit, the entire // request is considered to be over limit. - repeated envoy.api.v2.ratelimit.RateLimitDescriptor descriptors = 2; + repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 2; // Rate limit requests can optionally specify the number of hits a request adds to the matched // limit. If the value is not set in the message, a request increases the matched limit by 1. @@ -46,8 +47,10 @@ message RateLimitResponse { enum Code { // The response code is not known. UNKNOWN = 0; + // The response code to notify that the number of requests are under limit. OK = 1; + // The response code to notify that the number of requests are over limit. OVER_LIMIT = 2; } @@ -57,18 +60,23 @@ message RateLimitResponse { enum Unit { // The time unit is not known. UNKNOWN = 0; + // The time unit representing a second. SECOND = 1; + // The time unit representing a minute. MINUTE = 2; + // The time unit representing an hour. HOUR = 3; + // The time unit representing a day. DAY = 4; } // The number of requests per unit of time. uint32 requests_per_unit = 1; + // The unit of time. Unit unit = 2; } @@ -76,8 +84,10 @@ message RateLimitResponse { message DescriptorStatus { // The response code for an individual descriptor. Code code = 1; + // The current limit as configured by the server. Useful for debugging, etc. RateLimit current_limit = 2; + // The limit remaining in the current time unit. uint32 limit_remaining = 3; } @@ -85,10 +95,12 @@ message RateLimitResponse { // The overall response code which takes into account all of the descriptors that were passed // in the RateLimitRequest message. Code overall_code = 1; + // A list of DescriptorStatus messages which matches the length of the descriptor list passed // in the RateLimitRequest. This can be used by the caller to determine which individual // descriptors failed and/or what the currently configured limits are for all of them. repeated DescriptorStatus statuses = 2; + // A list of headers to add to the response - repeated envoy.api.v2.core.HeaderValue headers = 3; + repeated api.v2.core.HeaderValue headers = 3; } diff --git a/api/envoy/service/tap/v2alpha/common.proto b/api/envoy/service/tap/v2alpha/common.proto index 4aa06568c85e..2ebf2ae17b6d 100644 --- a/api/envoy/service/tap/v2alpha/common.proto +++ b/api/envoy/service/tap/v2alpha/common.proto @@ -1,19 +1,19 @@ syntax = "proto3"; -import "envoy/api/v2/route/route.proto"; +package envoy.service.tap.v2alpha; + +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; + import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/grpc_service.proto"; +import "envoy/api/v2/route/route.proto"; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -package envoy.service.tap.v2alpha; - -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; - // [#protodoc-title: Common tap configuration] // Tap configuration. @@ -22,11 +22,11 @@ message TapConfig { // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message.required = true]; + MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. - OutputConfig output_config = 2 [(validate.rules).message.required = true]; + OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for // which the tap matching is enabled. When not enabled, the request\connection will not be @@ -36,7 +36,7 @@ message TapConfig { // // This field defaults to 100/:ref:`HUNDRED // `. - envoy.api.v2.core.RuntimeFractionalPercent tap_enabled = 3; + api.v2.core.RuntimeFractionalPercent tap_enabled = 3; } // Tap match configuration. This is a recursive structure which allows complex nested match @@ -45,7 +45,7 @@ message MatchPredicate { // A set of match configurations used for logical operations. message MatchSet { // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated .min_items = 2]; + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { @@ -63,7 +63,7 @@ message MatchPredicate { MatchPredicate not_match = 3; // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool.const = true]; + bool any_match = 4 [(validate.rules).bool = {const: true}]; // HTTP request headers match configuration. HttpHeadersMatch http_request_headers_match = 5; @@ -89,7 +89,7 @@ message HttpHeadersMatch { message OutputConfig { // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple // sink types are supported this constraint will be relaxed. - repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1, max_items: 1}]; + repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; // For buffered tapping, the maximum amount of received body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated @@ -153,7 +153,7 @@ message OutputSink { } // Sink output format. - Format format = 1 [(validate.rules).enum.defined_only = true]; + Format format = 1 [(validate.rules).enum = {defined_only: true}]; oneof output_sink_type { option (validate.required) = true; @@ -186,7 +186,7 @@ message FilePerTapSink { // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC @@ -196,5 +196,5 @@ message StreamingGrpcSink { string tap_id = 1; // The gRPC server that hosts the Tap Sink Service. - envoy.api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; + api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/type/http_status.proto b/api/envoy/type/http_status.proto index 3f89ada09776..acde59c49eb2 100644 --- a/api/envoy/type/http_status.proto +++ b/api/envoy/type/http_status.proto @@ -18,68 +18,118 @@ enum StatusCode { Continue = 100; OK = 200; + Created = 201; + Accepted = 202; + NonAuthoritativeInformation = 203; + NoContent = 204; + ResetContent = 205; + PartialContent = 206; + MultiStatus = 207; + AlreadyReported = 208; + IMUsed = 226; MultipleChoices = 300; + MovedPermanently = 301; + Found = 302; + SeeOther = 303; + NotModified = 304; + UseProxy = 305; + TemporaryRedirect = 307; + PermanentRedirect = 308; BadRequest = 400; + Unauthorized = 401; + PaymentRequired = 402; + Forbidden = 403; + NotFound = 404; + MethodNotAllowed = 405; + NotAcceptable = 406; + ProxyAuthenticationRequired = 407; + RequestTimeout = 408; + Conflict = 409; + Gone = 410; + LengthRequired = 411; + PreconditionFailed = 412; + PayloadTooLarge = 413; + URITooLong = 414; + UnsupportedMediaType = 415; + RangeNotSatisfiable = 416; + ExpectationFailed = 417; + MisdirectedRequest = 421; + UnprocessableEntity = 422; + Locked = 423; + FailedDependency = 424; + UpgradeRequired = 426; + PreconditionRequired = 428; + TooManyRequests = 429; + RequestHeaderFieldsTooLarge = 431; InternalServerError = 500; + NotImplemented = 501; + BadGateway = 502; + ServiceUnavailable = 503; + GatewayTimeout = 504; + HTTPVersionNotSupported = 505; + VariantAlsoNegotiates = 506; + InsufficientStorage = 507; + LoopDetected = 508; + NotExtended = 510; + NetworkAuthenticationRequired = 511; } // HTTP status. message HttpStatus { // Supplies HTTP response code. - StatusCode code = 1 - [(validate.rules).enum = {not_in: [0]}, (validate.rules).enum.defined_only = true]; + StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; } diff --git a/api/envoy/type/matcher/metadata.proto b/api/envoy/type/matcher/metadata.proto index 56b69eae5968..d2936a5c4317 100644 --- a/api/envoy/type/matcher/metadata.proto +++ b/api/envoy/type/matcher/metadata.proto @@ -79,16 +79,16 @@ message MetadataMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string.min_bytes = 1]; + string key = 1 [(validate.rules).string = {min_bytes: 1}]; } } // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string.min_bytes = 1]; + string filter = 1 [(validate.rules).string = {min_bytes: 1}]; // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated .min_items = 1]; + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; // The MetadataMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message.required = true]; + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/type/matcher/number.proto b/api/envoy/type/matcher/number.proto index 5c8cec7bcbdc..09eb811606aa 100644 --- a/api/envoy/type/matcher/number.proto +++ b/api/envoy/type/matcher/number.proto @@ -19,7 +19,7 @@ message DoubleMatcher { // If specified, the input double value must be in the range specified here. // Note: The range is using half-open interval semantics [start, end). - envoy.type.DoubleRange range = 1; + DoubleRange range = 1; // If specified, the input double value must be equal to the value specified here. double exact = 2; diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto index cf6343c9ac51..98819364d9e2 100644 --- a/api/envoy/type/matcher/regex.proto +++ b/api/envoy/type/matcher/regex.proto @@ -7,6 +7,7 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.type.matcher"; import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; // [#protodoc-title: RegexMatcher] @@ -28,9 +29,9 @@ message RegexMatcher { option (validate.required) = true; // Google's RE2 regex engine. - GoogleRE2 google_re2 = 1 [(validate.rules).message.required = true]; + GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; } // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string.min_bytes = 1]; + string regex = 2 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto index 874bfb528389..f926af343fd4 100644 --- a/api/envoy/type/matcher/string.proto +++ b/api/envoy/type/matcher/string.proto @@ -30,7 +30,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string.min_bytes = 1]; + string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -38,7 +38,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string.min_bytes = 1]; + string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; // The input string must match the regular expression specified here. // The regex grammar is defined `here @@ -53,14 +53,14 @@ message StringMatcher { // .. attention:: // This field has been deprecated in favor of `safe_regex` as it is not safe for use with // untrusted input in all cases. - string regex = 4 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string regex = 4 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // The input string must match the regular expression specified here. - RegexMatcher safe_regex = 5 [(validate.rules).message.required = true]; + RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; } } // Specifies a list of ways to match a string. message ListStringMatcher { - repeated StringMatcher patterns = 1 [(validate.rules).repeated .min_items = 1]; + repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/type/percent.proto b/api/envoy/type/percent.proto index c577093eea0a..6d0868fd0ede 100644 --- a/api/envoy/type/percent.proto +++ b/api/envoy/type/percent.proto @@ -12,7 +12,7 @@ import "validate/validate.proto"; // Identifies a percentage, in the range [0.0, 100.0]. message Percent { - double value = 1 [(validate.rules).double = {gte: 0, lte: 100}]; + double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; } // A fractional percentage is used in cases in which for performance reasons performing floating @@ -22,9 +22,6 @@ message Percent { // * **Example**: 1/100 = 1%. // * **Example**: 3/10000 = 0.03%. message FractionalPercent { - // Specifies the numerator. Defaults to 0. - uint32 numerator = 1; - // Fraction percentages support several fixed denominator values. enum DenominatorType { // 100. @@ -43,7 +40,10 @@ message FractionalPercent { MILLION = 2; } + // Specifies the numerator. Defaults to 0. + uint32 numerator = 1; + // Specifies the denominator. If the denominator specified is less than the numerator, the final // fractional percentage is capped at 1 (100%). - DenominatorType denominator = 2 [(validate.rules).enum.defined_only = true]; + DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; } diff --git a/tools/api_proto_plugin/plugin.py b/tools/api_proto_plugin/plugin.py index 0a56ea8e3f8b..66223f9e1566 100644 --- a/tools/api_proto_plugin/plugin.py +++ b/tools/api_proto_plugin/plugin.py @@ -54,4 +54,9 @@ def Plugin(output_suffix, visitor): stats_file.name = file_proto.name + output_suffix + '.profile' ps.print_stats() stats_file.content = stats_stream.getvalue() + # Also include the original FileDescriptorProto as text proto, this is + # useful when debugging. + descriptor_file = response.file.add() + descriptor_file.name = file_proto.name + ".descriptor.proto" + descriptor_file.content = str(file_proto) sys.stdout.buffer.write(response.SerializeToString()) diff --git a/tools/api_proto_plugin/traverse.py b/tools/api_proto_plugin/traverse.py index 6ad97b8699aa..57c08f664b01 100644 --- a/tools/api_proto_plugin/traverse.py +++ b/tools/api_proto_plugin/traverse.py @@ -3,6 +3,20 @@ from tools.api_proto_plugin import type_context +def TraverseService(type_context, service_proto, visitor): + """Traverse an enum definition. + + Args: + type_context: type_context.TypeContext for service type. + service_proto: ServiceDescriptorProto for service. + visitor: visitor.Visitor defining the business logic of the plugin. + + Returns: + Plugin specific output. + """ + return visitor.VisitService(service_proto, type_context) + + def TraverseEnum(type_context, enum_proto, visitor): """Traverse an enum definition. @@ -61,6 +75,10 @@ def TraverseFile(file_proto, visitor): """ source_code_info = type_context.SourceCodeInfo(file_proto.name, file_proto.source_code_info) package_type_context = type_context.TypeContext(source_code_info, file_proto.package) + services = [ + TraverseService(package_type_context.ExtendService(index, service.name), service, visitor) + for index, service in enumerate(file_proto.service) + ] msgs = [ TraverseMessage(package_type_context.ExtendMessage(index, msg.name), msg, visitor) for index, msg in enumerate(file_proto.message_type) @@ -69,4 +87,4 @@ def TraverseFile(file_proto, visitor): TraverseEnum(package_type_context.ExtendEnum(index, enum.name), enum, visitor) for index, enum in enumerate(file_proto.enum_type) ] - return visitor.VisitFile(file_proto, package_type_context, msgs, enums) + return visitor.VisitFile(file_proto, package_type_context, services, msgs, enums) diff --git a/tools/api_proto_plugin/type_context.py b/tools/api_proto_plugin/type_context.py index d69c120a1a63..e88f02404193 100644 --- a/tools/api_proto_plugin/type_context.py +++ b/tools/api_proto_plugin/type_context.py @@ -77,6 +77,36 @@ def LeadingCommentPathLookup(self, path): annotations.ExtractAnnotations(location.leading_comments, self.file_level_annotations)) return Comment('', {}) + def LeadingDetachedCommentsPathLookup(self, path): + """Lookup leading detached comments by path in SourceCodeInfo. + + Args: + path: a list of path indexes as per + https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. + + Returns: + List of detached comment strings. + """ + location = self.LocationPathLookup(path) + if location is not None and location.leading_detached_comments != self.file_level_comments: + return location.leading_detached_comments + return [] + + def TrailingCommentPathLookup(self, path): + """Lookup trailing comment by path in SourceCodeInfo. + + Args: + path: a list of path indexes as per + https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. + + Returns: + Raw detached comment string + """ + location = self.LocationPathLookup(path) + if location is not None: + return location.trailing_comments + return '' + class TypeContext(object): """Contextual information for a message/field. @@ -157,6 +187,15 @@ def ExtendEnum(self, index, name): """ return self._Extend([5, index], 'enum', name) + def ExtendService(self, index, name): + """Extend type context with a service. + + Args: + index: service index in file. + name: service name. + """ + return self._Extend([6, index], 'service', name) + def ExtendNestedEnum(self, index, name): """Extend type context with a nested enum. @@ -184,6 +223,15 @@ def ExtendOneof(self, index, name): """ return self._Extend([8, index], 'oneof', name) + def ExtendMethod(self, index, name): + """Extend type context with a service method declaration. + + Args: + index: method index in service. + name: method name. + """ + return self._Extend([2, index], 'method', name) + @property def location(self): """SourceCodeInfo.Location for type context.""" @@ -193,3 +241,13 @@ def location(self): def leading_comment(self): """Leading comment for type context.""" return self.source_code_info.LeadingCommentPathLookup(self.path) + + @property + def leading_detached_comments(self): + """Leading detached comments for type context.""" + return self.source_code_info.LeadingDetachedCommentsPathLookup(self.path) + + @property + def trailing_comment(self): + """Trailing comment for type context.""" + return self.source_code_info.TrailingCommentPathLookup(self.path) diff --git a/tools/api_proto_plugin/visitor.py b/tools/api_proto_plugin/visitor.py index 0065537f0a6e..1dfd361fdba5 100644 --- a/tools/api_proto_plugin/visitor.py +++ b/tools/api_proto_plugin/visitor.py @@ -4,6 +4,18 @@ class Visitor(object): """Abstract visitor interface for api_proto_plugin implementation.""" + def VisitService(self, service_proto, type_context): + """Visit a service definition. + + Args: + service_proto: ServiceDescriptorProto for service. + type_context: type_context.TypeContext for service type. + + Returns: + Plugin specific output. + """ + pass + def VisitEnum(self, enum_proto, type_context): """Visit an enum definition. @@ -30,12 +42,13 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): """ pass - def VisitFile(self, file_proto, type_context, msgs, enums): + def VisitFile(self, file_proto, type_context, services, msgs, enums): """Visit a proto file definition. Args: file_proto: FileDescriptorProto for file. type_context: type_context.TypeContext for file. + services: a list of results from visiting services. msgs: a list of results from visiting messages. enums: a list of results from visiting enums. diff --git a/tools/proto_format.sh b/tools/proto_format.sh new file mode 100755 index 000000000000..71cfea2d0eb7 --- /dev/null +++ b/tools/proto_format.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Reformat API protos to canonical proto style using protoxform. + +set -e + +if [[ -n "$(git status --untracked-files=no --porcelain)" ]] +then + echo "git status is dirty, $0 requires a clean git tree" + exit 1 +fi + +# TODO(htuch): This script started life by cloning docs/build.sh. It depends on +# the @envoy_api//docs:protos target in a few places as a result. This is not +# the precise set of protos we want to format, but as a starting place it seems +# reasonable. In the future, we should change the logic here. +bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \ + tools/protoxform/protoxform.bzl%proto_xform_aspect --output_groups=proto --action_env=CPROFILE_ENABLED=1 \ + --spawn_strategy=standalone --host_force_python=PY3 + +declare -r DOCS_DEPS=$(bazel query "labels(deps, @envoy_api//docs:protos)") + +# Copy protos from Bazel build-cache back into source tree. +for PROTO_TARGET in ${DOCS_DEPS} +do + for p in $(bazel query "labels(srcs, ${PROTO_TARGET})" ) + do + declare PROTO_TARGET_WITHOUT_PREFIX="${PROTO_TARGET#@envoy_api//}" + declare PROTO_TARGET_CANONICAL="${PROTO_TARGET_WITHOUT_PREFIX/://}" + declare PROTO_FILE_WITHOUT_PREFIX="${p#@envoy_api//}" + declare PROTO_FILE_CANONICAL="${PROTO_FILE_WITHOUT_PREFIX/://}" + declare DEST="api/${PROTO_FILE_CANONICAL}" + + [[ -f "${DEST}" ]] + cp bazel-bin/external/envoy_api/"${PROTO_TARGET_CANONICAL}/${PROTO_FILE_CANONICAL}.proto" "${DEST}" + done +done diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index 341f93afa070..626bfc896b19 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -490,7 +490,7 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) - def VisitFile(self, file_proto, type_context, msgs, enums): + def VisitFile(self, file_proto, type_context, services, msgs, enums): # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) diff --git a/tools/protoxform/BUILD b/tools/protoxform/BUILD new file mode 100644 index 000000000000..780ad11d4fd2 --- /dev/null +++ b/tools/protoxform/BUILD @@ -0,0 +1,14 @@ +licenses(["notice"]) # Apache 2 + +py_binary( + name = "protoxform", + srcs = ["protoxform.py"], + python_version = "PY3", + visibility = ["//visibility:public"], + deps = [ + "//tools/api_proto_plugin", + "@com_envoyproxy_protoc_gen_validate//validate:validate_py", + "@com_google_googleapis//google/api:annotations_py_proto", + "@com_google_protobuf//:protobuf_python", + ], +) diff --git a/tools/protoxform/protoxform.bzl b/tools/protoxform/protoxform.bzl new file mode 100644 index 000000000000..e20650ddfb72 --- /dev/null +++ b/tools/protoxform/protoxform.bzl @@ -0,0 +1,95 @@ +# TODO(htuch): this is a clone+modify from //tools/protodoc:protodoc.bzl. +# Factor out common parts for this kind of API protoc aspect. + +# Borrowed from +# https://github.com/bazelbuild/rules_go/blob/master/proto/toolchain.bzl. This +# does some magic munging to remove workspace prefixes from output paths to +# convert path as understood by Bazel into paths as understood by protoc. +def _proto_path(proto): + """ + The proto path is not really a file path + It's the path to the proto that was seen when the descriptor file was generated. + """ + path = proto.path + root = proto.root.path + ws = proto.owner.workspace_root + if path.startswith(root): + path = path[len(root):] + if path.startswith("/"): + path = path[1:] + if path.startswith(ws): + path = path[len(ws):] + if path.startswith("/"): + path = path[1:] + return path + +# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) +# that can be invoked from the CLI to perform API transforms via //tools/protoxform for +# proto_library targets. Example use: +# +# bazel build //api --aspects tools/protoxform/protoxform.bzl%proto_xform_aspect \ +# --output_groups=proto +def _proto_xform_aspect_impl(target, ctx): + # Compute .proto files from the current proto_library node's dependencies. + transitive_outputs = depset(transitive = [dep.output_groups["proto"] for dep in ctx.rule.attr.deps]) + proto_sources = target[ProtoInfo].direct_sources + + # If this proto_library doesn't actually name any sources, e.g. //api:api, + # but just glues together other libs, we just need to follow the graph. + if not proto_sources: + return [OutputGroupInfo(proto = transitive_outputs)] + + # Figure out the set of import paths. Ideally we would use descriptor sets + # built by proto_library, which avoid having to do nasty path mangling, but + # these don't include source_code_info, which we need for comment + # extractions. See https://github.com/bazelbuild/bazel/issues/3971. + import_paths = [] + for f in target[ProtoInfo].transitive_sources.to_list(): + if f.root.path: + import_path = f.root.path + "/" + f.owner.workspace_root + else: + import_path = f.owner.workspace_root + if import_path: + import_paths += [import_path] + + # The outputs live in the ctx.label's package root. We add some additional + # path information to match with protoc's notion of path relative locations. + outputs = [ctx.actions.declare_file(ctx.label.name + "/" + _proto_path(f) + + ".proto") for f in proto_sources] + + # Create the protoc command-line args. + ctx_path = ctx.label.package + "/" + ctx.label.name + output_path = outputs[0].root.path + "/" + outputs[0].owner.workspace_root + "/" + ctx_path + args = ["-I./" + ctx.label.workspace_root] + args += ["-I" + import_path for import_path in import_paths] + args += ["--plugin=protoc-gen-protoxform=" + ctx.executable._protoxform.path, "--protoxform_out=" + output_path] + args += [_proto_path(src) for src in target[ProtoInfo].direct_sources] + ctx.actions.run( + executable = ctx.executable._protoc, + arguments = args, + inputs = target[ProtoInfo].transitive_sources, + tools = [ctx.executable._protoxform], + outputs = outputs, + mnemonic = "protoxform", + use_default_shell_env = True, + ) + + transitive_outputs = depset(outputs, transitive = [transitive_outputs]) + return [OutputGroupInfo(proto = transitive_outputs)] + +proto_xform_aspect = aspect( + attr_aspects = ["deps"], + attrs = { + "_protoc": attr.label( + default = Label("@com_google_protobuf//:protoc"), + executable = True, + cfg = "exec", + ), + "_protoxform": attr.label( + default = Label("//tools/protoxform"), + executable = True, + cfg = "exec", + ), + }, + implementation = _proto_xform_aspect_impl, +) diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py new file mode 100755 index 000000000000..b17a3a0ac3a6 --- /dev/null +++ b/tools/protoxform/protoxform.py @@ -0,0 +1,405 @@ +# protoc plugin to map from FileDescriptorProtos to a canonicaly formatted +# proto. +# +# protoxform is currently only a formatting tool, but will act as the basis for +# vN -> v(N+1) API migration tooling, allowing for things like deprecated field +# removal, package renaming, field movement, providing both .proto and .cc code +# generation to support automation of Envoy API version translation. +# +# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto +# for the underlying protos mentioned in this file. See + +from collections import deque +import functools +import os +import re +import subprocess + +from tools.api_proto_plugin import plugin +from tools.api_proto_plugin import visitor + +from google.api import annotations_pb2 +from google.protobuf import text_format +from validate import validate_pb2 + +CLANG_FORMAT_STYLE = ('{ColumnLimit: 100, SpacesInContainerLiterals: false, ' + 'AllowShortFunctionsOnASingleLine: false}') + + +class ProtoXformError(Exception): + """Base error class for the protoxform module.""" + + +def ClangFormat(contents): + """Run proto-style oriented clang-format over given string. + + Args: + contents: a string with proto contents. + + Returns: + clang-formatted string + """ + return subprocess.run( + ['clang-format', + '--style=%s' % CLANG_FORMAT_STYLE, '--assume-filename=.proto'], + input=contents.encode('utf-8'), + stdout=subprocess.PIPE).stdout + + +def FormatBlock(block): + """Append \n to a .proto section (e.g. + + comment, message definition, etc.) if non-empty. + + Args: + block: a string representing the section. + + Returns: + A string with appropriate whitespace. + """ + if block.strip(): + return block + '\n' + return '' + + +def FormatComments(comments): + """Format a list of comment blocks from SourceCodeInfo. + + Prefixes // to each line, separates blocks by spaces. + + Args: + comments: a list of blocks, each block is a list of strings representing + lines in each block. + + Returns: + A string reprenting the formatted comment blocks. + """ + + # TODO(htuch): not sure why this is needed, but clang-format does some weird + # stuff with // comment indents when we have these trailing \ + def FixupTrailingBackslash(s): + return s[:-1].rstrip() if s.endswith('\\') else s + + comments = '\n\n'.join( + '\n'.join(['//%s' % FixupTrailingBackslash(line) + for line in comment.split('\n')[:-1]]) + for comment in comments) + return FormatBlock(comments) + + +def FormatTypeContextComments(type_context): + """Format the leading/trailing comments in a given TypeContext. + + Args: + type_context: contextual information for message/enum/field. + + Returns: + Tuple of formatted leading and trailing comment blocks. + """ + leading = FormatComments( + list(type_context.leading_detached_comments) + [type_context.leading_comment.raw]) + trailing = FormatBlock(FormatComments([type_context.trailing_comment])) + return leading, trailing + + +def FormatHeaderFromFile(source_code_info, file_proto): + """Format proto header. + + Args: + source_code_info: SourceCodeInfo object. + file_proto: FileDescriptorProto for file. + + Returns: + Formatted proto header as a string. + """ + + def CamelCase(s): + return ''.join(t.capitalize() for t in re.split('[\._]', s)) + + package_line = 'package %s;\n' % file_proto.package + file_block = '\n'.join(['syntax = "proto3";\n', package_line]) + + options = [ + 'option java_outer_classname = "%s";' % CamelCase(os.path.basename(file_proto.name)), + 'option java_multiple_files = true;', + 'option java_package = "io.envoyproxy.%s";' % file_proto.package, + ] + if file_proto.service: + options += ['option java_generic_services = true;'] + options_block = FormatBlock('\n'.join(options)) + + envoy_imports = [] + google_imports = [] + infra_imports = [] + misc_imports = [] + + for d in file_proto.dependency: + if d.startswith('envoy/'): + envoy_imports.append(d) + elif d.startswith('google/'): + google_imports.append(d) + elif d.startswith('validate/'): + infra_imports.append(d) + else: + misc_imports.append(d) + + def FormatImportBlock(xs): + if not xs: + return '' + return FormatBlock('\n'.join(sorted('import "%s";' % x for x in xs))) + + import_block = '\n'.join( + map(FormatImportBlock, [envoy_imports, google_imports, misc_imports, infra_imports])) + comment_block = FormatComments(source_code_info.file_level_comments) + + return ''.join(map(FormatBlock, [file_block, options_block, import_block, comment_block])) + + +def NormalizeFieldTypeName(type_context, field_fqn): + """Normalize a fully qualified field type name, e.g. + + .envoy.foo.bar is normalized to foo.bar. + + Considers type context to minimize type prefix. + + Args: + field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. + type_context: contextual information for message/enum/field. + + Returns: + Normalized type name as a string. + """ + if field_fqn.startswith('.'): + # Let's say we have type context namespace a.b.c.d.e and the type we're + # trying to normalize is a.b.d.e. We take (from the end) on package fragment + # at a time, and apply the inner-most evaluation that protoc performs to see + # if we evaluate to the fully qualified type. If so, we're done. It's not + # sufficient to compute common prefix and drop that, since in the above + # example the normalized type name would be d.e, which proto resolves inner + # most as a.b.c.d.e (bad) instead of the intended a.b.d.e. + field_fqn_splits = field_fqn[1:].split('.') + type_context_splits = type_context.name.split('.')[:-1] + remaining_field_fqn_splits = deque(field_fqn_splits[:-1]) + normalized_splits = deque([field_fqn_splits[-1]]) + + def EquivalentInTypeContext(splits): + type_context_splits_tmp = deque(type_context_splits) + while type_context_splits_tmp: + # If we're in a.b.c and the FQN is a.d.Foo, we want to return true once + # we have type_context_splits_tmp as [a] and splits as [d, Foo]. + if list(type_context_splits_tmp) + list(splits) == field_fqn_splits: + return True + # If we're in a.b.c.d.e.f and the FQN is a.b.d.e.Foo, we want to return True + # once we have type_context_splits_tmp as [a] and splits as [b, d, e, Foo], but + # not when type_context_splits_tmp is [a, b, c] and FQN is [d, e, Foo]. + if len(splits) > 1 and '.'.join(type_context_splits_tmp).endswith('.'.join( + list(splits)[:-1])): + return False + type_context_splits_tmp.pop() + return False + + while remaining_field_fqn_splits and not EquivalentInTypeContext(normalized_splits): + normalized_splits.appendleft(remaining_field_fqn_splits.pop()) + + return '.'.join(normalized_splits) + return field_fqn + + +def TypeNameFromFQN(fqn): + return fqn[1:] + + +def FormatFieldType(type_context, field): + """Format a FieldDescriptorProto type description. + + Args: + type_context: contextual information for message/enum/field. + field: FieldDescriptor proto. + + Returns: + Formatted proto field type as string. + """ + label = 'repeated ' if field.label == field.LABEL_REPEATED else '' + type_name = label + NormalizeFieldTypeName(type_context, field.type_name) + + if field.type == field.TYPE_MESSAGE: + if type_context.map_typenames and TypeNameFromFQN( + field.type_name) in type_context.map_typenames: + return 'map<%s, %s>' % tuple( + map(functools.partial(FormatFieldType, type_context), + type_context.map_typenames[TypeNameFromFQN(field.type_name)])) + return type_name + elif field.type_name: + return type_name + + pretty_type_names = { + field.TYPE_DOUBLE: 'double', + field.TYPE_FLOAT: 'float', + field.TYPE_INT32: 'int32', + field.TYPE_SFIXED32: 'int32', + field.TYPE_SINT32: 'int32', + field.TYPE_FIXED32: 'uint32', + field.TYPE_UINT32: 'uint32', + field.TYPE_INT64: 'int64', + field.TYPE_SFIXED64: 'int64', + field.TYPE_SINT64: 'int64', + field.TYPE_FIXED64: 'uint64', + field.TYPE_UINT64: 'uint64', + field.TYPE_BOOL: 'bool', + field.TYPE_STRING: 'string', + field.TYPE_BYTES: 'bytes', + } + if field.type in pretty_type_names: + return label + pretty_type_names[field.type] + raise ProtoXformError('Unknown field type ' + str(field.type)) + + +def FormatServiceMethod(type_context, method): + """Format a service MethodDescriptorProto. + + Args: + type_context: contextual information for method. + method: MethodDescriptorProto proto. + + Returns: + Formatted service method as string. + """ + + def FormatStreaming(s): + return 'stream ' if s else '' + + def FormatHttpOptions(options): + if options.HasExtension(annotations_pb2.http): + http_options = options.Extensions[annotations_pb2.http] + return 'option (google.api.http) = { %s };' % str(http_options) + return '' + + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + return '%srpc %s(%s%s%s) returns (%s%s) {%s}\n' % ( + leading_comment, method.name, trailing_comment, FormatStreaming( + method.client_streaming), NormalizeFieldTypeName( + type_context, method.input_type), FormatStreaming(method.server_streaming), + NormalizeFieldTypeName(type_context, method.output_type), FormatHttpOptions(method.options)) + + +def FormatValidateFieldRules(rules): + """Format validate_pb2 rules. + + Args: + rules: validate_pb2 rules proto. + + Returns: + Formatted validation rules as string, suitable for proto field annotation. + """ + return ' '.join('.%s = { %s }' % + (field.name, text_format.MessageToString(value, as_one_line=True)) + for field, value in rules.ListFields()) + + +def FormatField(type_context, field): + """Format FieldDescriptorProto as a proto field. + + Args: + type_context: contextual information for message/enum/field. + field: FieldDescriptor proto. + + Returns: + Formatted proto field as a string. + """ + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + annotations = [] + if field.options.HasExtension(validate_pb2.rules): + rules = field.options.Extensions[validate_pb2.rules] + annotations.append('(validate.rules) %s' % FormatValidateFieldRules(rules)) + if field.options.deprecated: + annotations.append('deprecated = true') + formatted_annotations = '[ %s]' % ','.join(annotations) if annotations else '' + return '%s%s %s = %d%s;\n%s' % (leading_comment, FormatFieldType( + type_context, field), field.name, field.number, formatted_annotations, trailing_comment) + + +def FormatEnumValue(type_context, value): + """Format a EnumValueDescriptorProto as a proto enum value. + + Args: + type_context: contextual information for message/enum/field. + value: EnumValueDescriptorProto. + + Returns: + Formatted proto enum value as a string. + """ + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + return '%s%s = %d;\n%s' % (leading_comment, value.name, value.number, trailing_comment) + + +class ProtoFormatVisitor(visitor.Visitor): + """Visitor to generate a proto representation from a FileDescriptor proto. + + See visitor.Visitor for visitor method docs comments. + """ + + def VisitService(self, service_proto, type_context): + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + methods = '\n'.join( + FormatServiceMethod(type_context.ExtendMethod(index, m.name), m) + for index, m in enumerate(service_proto.method)) + return '%sservice %s {\n%s%s\n}\n' % (leading_comment, service_proto.name, trailing_comment, + methods) + + def VisitEnum(self, enum_proto, type_context): + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + values = '\n'.join( + FormatEnumValue(type_context.ExtendField(index, value.name), value) + for index, value in enumerate(enum_proto.value)) + return '%senum %s {\n%s%s\n}\n' % (leading_comment, enum_proto.name, trailing_comment, values) + + def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + formatted_enums = FormatBlock('\n'.join(nested_enums)) + formatted_msgs = FormatBlock('\n'.join(nested_msgs)) + # Reserved fields. + reserved_fields = FormatBlock('reserved %s;\n' % ','.join( + map(str, sum([list(range(rr.start, rr.end)) for rr in msg_proto.reserved_range], + [])))) if msg_proto.reserved_range else '' + # Recover the oneof structure. This needs some extra work, since + # DescriptorProto just gives use fields and a oneof_index that can allow + # recovery of the original oneof placement. + fields = '' + oneof_index = None + for index, field in enumerate(msg_proto.field): + if oneof_index is not None: + if not field.HasField('oneof_index') or field.oneof_index != oneof_index: + fields += '}\n\n' + oneof_index = None + if oneof_index is None and field.HasField('oneof_index'): + oneof_index = field.oneof_index + oneof_proto = msg_proto.oneof_decl[oneof_index] + if oneof_proto.options.HasExtension(validate_pb2.required): + oneof_options = 'option (validate.required) = true;\n\n' + else: + oneof_options = '' + oneof_leading_comment, oneof_trailing_comment = FormatTypeContextComments( + type_context.ExtendOneof(oneof_index, field.name)) + fields += '%soneof %s {\n%s%s' % (oneof_leading_comment, oneof_proto.name, + oneof_trailing_comment, oneof_options) + fields += FormatBlock(FormatField(type_context.ExtendField(index, field.name), field)) + if oneof_index is not None: + fields += '}\n\n' + return '%smessage %s {\n%s%s%s%s%s\n}\n' % (leading_comment, msg_proto.name, trailing_comment, + formatted_enums, formatted_msgs, reserved_fields, + fields) + + def VisitFile(self, file_proto, type_context, services, msgs, enums): + header = FormatHeaderFromFile(type_context.source_code_info, file_proto) + formatted_services = FormatBlock('\n'.join(services)) + formatted_enums = FormatBlock('\n'.join(enums)) + formatted_msgs = FormatBlock('\n'.join(msgs)) + return ClangFormat(header + formatted_services + formatted_enums + formatted_msgs) + + +def Main(): + plugin.Plugin('.proto', ProtoFormatVisitor()) + + +if __name__ == '__main__': + Main() From baa05ae511997c332d7373852fc127df4b7bc2e0 Mon Sep 17 00:00:00 2001 From: Harvey Tuch Date: Fri, 20 Sep 2019 16:01:03 -0400 Subject: [PATCH 2/3] Fix some missing enum value deprecations. Signed-off-by: Harvey Tuch --- api/envoy/api/v2/cds.proto | 2 +- api/envoy/api/v2/core/config_source.proto | 4 ++-- api/envoy/api/v2/eds.proto | 4 +++- api/envoy/config/trace/v2/trace.proto | 2 +- tools/protoxform/protoxform.py | 7 ++++++- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto index 4729ef832afa..41531989d7e7 100644 --- a/api/envoy/api/v2/cds.proto +++ b/api/envoy/api/v2/cds.proto @@ -107,7 +107,7 @@ message Cluster { // // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. // - ORIGINAL_DST_LB = 4; + ORIGINAL_DST_LB = 4 [deprecated = true]; // Refer to the :ref:`Maglev load balancing policy` // for an explanation. diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index f0921d3b47a7..5cbe95ee7299 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -22,7 +22,7 @@ message ApiConfigSource { enum ApiType { // Ideally this would be 'reserved 0' but one can't reserve the default // value. Instead we throw an exception if this is ever used. - UNSUPPORTED_REST_LEGACY = 0; + UNSUPPORTED_REST_LEGACY = 0 [deprecated = true]; // REST-JSON v2 API. The `canonical JSON encoding // `_ for @@ -61,7 +61,7 @@ message ApiConfigSource { google.protobuf.Duration refresh_delay = 3; // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; + google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {seconds: 0}}]; // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be // rate limited. diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto index 15518902977a..b616eaf15847 100644 --- a/api/envoy/api/v2/eds.proto +++ b/api/envoy/api/v2/eds.proto @@ -47,6 +47,7 @@ service EndpointDiscoveryService { // load_balancing_weight of its locality. First, a locality will be selected, // then an endpoint within that locality will be chose based on its weight. message ClusterLoadAssignment { + // Load balancing policy settings. message Policy { message DropOverload { @@ -100,7 +101,8 @@ message ClusterLoadAssignment { // If no new assignments are received before this time expires the endpoints // are considered stale and should be marked unhealthy. // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; + google.protobuf.Duration endpoint_stale_after = 4 + [(validate.rules).duration = {gt {seconds: 0}}]; // The flag to disable overprovisioning. If it is set to true, // :ref:`overprovisioning factor diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto index f4b1d8391008..f651dc3d3ed9 100644 --- a/api/envoy/config/trace/v2/trace.proto +++ b/api/envoy/config/trace/v2/trace.proto @@ -76,7 +76,7 @@ message ZipkinConfig { // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, // since in Zipkin realm this v1 version is considered to be not preferable anymore.] - HTTP_JSON_V1 = 0; + HTTP_JSON_V1 = 0 [deprecated = true]; // Zipkin API v2, JSON over HTTP. HTTP_JSON = 1; diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py index b17a3a0ac3a6..1b210364bab2 100755 --- a/tools/protoxform/protoxform.py +++ b/tools/protoxform/protoxform.py @@ -329,7 +329,12 @@ def FormatEnumValue(type_context, value): Formatted proto enum value as a string. """ leading_comment, trailing_comment = FormatTypeContextComments(type_context) - return '%s%s = %d;\n%s' % (leading_comment, value.name, value.number, trailing_comment) + annotations = [] + if value.options.deprecated: + annotations.append('deprecated = true') + formatted_annotations = '[ %s]' % ','.join(annotations) if annotations else '' + return '%s%s = %d%s;\n%s' % (leading_comment, value.name, value.number, formatted_annotations, + trailing_comment) class ProtoFormatVisitor(visitor.Visitor): From 662dde6eef7f1e46bec4421edec8533180af42c7 Mon Sep 17 00:00:00 2001 From: Harvey Tuch Date: Mon, 23 Sep 2019 12:51:15 -0400 Subject: [PATCH 3/3] Fix C#/Ruby namespace regression. Signed-off-by: Harvey Tuch --- api/envoy/api/v2/cluster/circuit_breaker.proto | 2 ++ api/envoy/api/v2/cluster/filter.proto | 2 ++ api/envoy/api/v2/cluster/outlier_detection.proto | 2 ++ api/envoy/api/v2/core/config_source.proto | 3 ++- api/envoy/api/v2/eds.proto | 4 +--- api/envoy/api/v2/lds.proto | 2 +- api/envoy/api/v2/listener/listener.proto | 2 ++ api/envoy/api/v2/listener/quic_config.proto | 2 ++ api/envoy/api/v2/listener/udp_listener_config.proto | 2 ++ tools/protoxform/protoxform.py | 10 ++++++++++ 10 files changed, 26 insertions(+), 5 deletions(-) diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto index e5eb3bb07289..d2e0a328e49f 100644 --- a/api/envoy/api/v2/cluster/circuit_breaker.proto +++ b/api/envoy/api/v2/cluster/circuit_breaker.proto @@ -5,6 +5,8 @@ package envoy.api.v2.cluster; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; +option csharp_namespace = "Envoy.Api.V2.ClusterNS"; +option ruby_package = "Envoy.Api.V2.ClusterNS"; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/api/v2/cluster/filter.proto b/api/envoy/api/v2/cluster/filter.proto index 6ecb536f1015..b89b2a6b778b 100644 --- a/api/envoy/api/v2/cluster/filter.proto +++ b/api/envoy/api/v2/cluster/filter.proto @@ -5,6 +5,8 @@ package envoy.api.v2.cluster; option java_outer_classname = "FilterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; +option csharp_namespace = "Envoy.Api.V2.ClusterNS"; +option ruby_package = "Envoy.Api.V2.ClusterNS"; import "google/protobuf/any.proto"; diff --git a/api/envoy/api/v2/cluster/outlier_detection.proto b/api/envoy/api/v2/cluster/outlier_detection.proto index 5b247d898b02..4702bd0a6f1c 100644 --- a/api/envoy/api/v2/cluster/outlier_detection.proto +++ b/api/envoy/api/v2/cluster/outlier_detection.proto @@ -5,6 +5,8 @@ package envoy.api.v2.cluster; option java_outer_classname = "OutlierDetectionProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; +option csharp_namespace = "Envoy.Api.V2.ClusterNS"; +option ruby_package = "Envoy.Api.V2.ClusterNS"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index c9517dcddefb..240d37b81ee2 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -61,7 +61,7 @@ message ApiConfigSource { google.protobuf.Duration refresh_delay = 3; // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {seconds: 0}}]; + google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be // rate limited. @@ -125,6 +125,7 @@ message ConfigSource { // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. AggregatedConfigSource ads = 3; + // [#not-implemented-hide:] // When set, the client will access the resources from the same server it got the // ConfigSource from, although not necessarily from the same stream. This is similar to the diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto index b616eaf15847..15518902977a 100644 --- a/api/envoy/api/v2/eds.proto +++ b/api/envoy/api/v2/eds.proto @@ -47,7 +47,6 @@ service EndpointDiscoveryService { // load_balancing_weight of its locality. First, a locality will be selected, // then an endpoint within that locality will be chose based on its weight. message ClusterLoadAssignment { - // Load balancing policy settings. message Policy { message DropOverload { @@ -101,8 +100,7 @@ message ClusterLoadAssignment { // If no new assignments are received before this time expires the endpoints // are considered stale and should be marked unhealthy. // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 - [(validate.rules).duration = {gt {seconds: 0}}]; + google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; // The flag to disable overprovisioning. If it is set to true, // :ref:`overprovisioning factor diff --git a/api/envoy/api/v2/lds.proto b/api/envoy/api/v2/lds.proto index cda343b29bf0..0ea940f6c476 100644 --- a/api/envoy/api/v2/lds.proto +++ b/api/envoy/api/v2/lds.proto @@ -213,5 +213,5 @@ message Listener { // and the top-level Listener should essentially be a oneof that selects between the // socket listener and the various types of API listener. That way, a given Listener message // can structurally only contain the fields of the relevant type.] - envoy.config.listener.v2.ApiListener api_listener = 19; + config.listener.v2.ApiListener api_listener = 19; } diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto index a5afa2ff044f..949075840ddf 100644 --- a/api/envoy/api/v2/listener/listener.proto +++ b/api/envoy/api/v2/listener/listener.proto @@ -5,6 +5,8 @@ package envoy.api.v2.listener; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; +option csharp_namespace = "Envoy.Api.V2.ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; import "envoy/api/v2/auth/cert.proto"; import "envoy/api/v2/core/address.proto"; diff --git a/api/envoy/api/v2/listener/quic_config.proto b/api/envoy/api/v2/listener/quic_config.proto index e4e3fee3c0d4..1f67a4f7000a 100644 --- a/api/envoy/api/v2/listener/quic_config.proto +++ b/api/envoy/api/v2/listener/quic_config.proto @@ -5,6 +5,8 @@ package envoy.api.v2.listener; option java_outer_classname = "QuicConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; +option csharp_namespace = "Envoy.Api.V2.ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/api/v2/listener/udp_listener_config.proto b/api/envoy/api/v2/listener/udp_listener_config.proto index a2fa43899166..4b489b99884c 100644 --- a/api/envoy/api/v2/listener/udp_listener_config.proto +++ b/api/envoy/api/v2/listener/udp_listener_config.proto @@ -5,6 +5,8 @@ package envoy.api.v2.listener; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; +option csharp_namespace = "Envoy.Api.V2.ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py index 1b210364bab2..25e59dd185ab 100755 --- a/tools/protoxform/protoxform.py +++ b/tools/protoxform/protoxform.py @@ -124,6 +124,16 @@ def CamelCase(s): 'option java_multiple_files = true;', 'option java_package = "io.envoyproxy.%s";' % file_proto.package, ] + # This is a workaround for C#/Ruby namespace conflicts between packages and + # objects, see https://github.com/envoyproxy/envoy/pull/3854. + # TODO(htuch): remove once v3 fixes this naming issue in + # https://github.com/envoyproxy/envoy/issues/8120. + if file_proto.package in ['envoy.api.v2.listener', 'envoy.api.v2.cluster']: + qualified_package = '.'.join(s.capitalize() for s in file_proto.package.split('.')) + 'NS' + options += [ + 'option csharp_namespace = "%s";' % qualified_package, + 'option ruby_package = "%s";' % qualified_package, + ] if file_proto.service: options += ['option java_generic_services = true;'] options_block = FormatBlock('\n'.join(options))