diff --git a/.azure-pipelines/linux.yml b/.azure-pipelines/linux.yml index aa834255f303..cf17ad071a49 100644 --- a/.azure-pipelines/linux.yml +++ b/.azure-pipelines/linux.yml @@ -1,4 +1,25 @@ jobs: +- job: format + dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. + pool: + vmImage: 'Ubuntu 16.04' + steps: + - script: | + ci/run_envoy_docker.sh ' + set -e + ci/do_ci.sh check_format + ci/do_ci.sh check_repositories + ci/do_ci.sh check_spelling + ci/do_ci.sh check_spelling_pedantic + ' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Run check format scripts" + - job: bazel strategy: matrix: @@ -10,8 +31,10 @@ jobs: CI_TARGET: 'bazel.gcc' compile_time_options: CI_TARGET: 'bazel.compile_time_options' - fuzz: - CI_TARGET: 'bazel.fuzz' + # This will run on every commit/PR and will make sure the corpus generated by the fuzzers as well as fixed crashes + # (on Fuzzit) is not crashing envoy. This will help find bugs BEFORE merging and not after. + fuzzit: + CI_TARGET: 'bazel.fuzzit_regression' dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. timeoutInMinutes: 360 pool: @@ -60,3 +83,39 @@ jobs: pathtoPublish: "$(Build.StagingDirectory)/envoy" artifactName: $(CI_TARGET) condition: always() + +- job: fuzzit_fuzzing + dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. + timeoutInMinutes: 360 + # this runs on every push to master / merge to master. this will build the fuzzers and will update them on Fuzzit where + # they will run asynchronously. Essentially this will make sure that the latest master version is always being fuzzed + # continuously. + condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master'), ne(variables['Build.Reason'], 'PullRequest')) + pool: + vmImage: 'Ubuntu 16.04' + steps: + - bash: | + echo "disk space at beginning of build:" + df -h + displayName: "Check disk space at beginning" + + - bash: | + sudo mkdir -p /etc/docker + echo '{ + "ipv6": true, + "fixed-cidr-v6": "2001:db8:1::/64" + }' | sudo tee /etc/docker/daemon.json + sudo service docker restart + displayName: "Enable IPv6" + + - script: ci/run_envoy_docker.sh 'ci/do_ci.sh bazel.fuzzit_fuzzing' + workingDirectory: $(Build.SourcesDirectory) + env: + FUZZIT_API_KEY: $(FuzzitApiKey) + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + ENVOY_RBE: "true" + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote --jobs=100 --curses=no" + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Fuzzit Regression" diff --git a/.circleci/config.yml b/.circleci/config.yml index 14cf64272abf..d16c75d6f493 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -67,6 +67,7 @@ jobs: - store_artifacts: path: /build/envoy/generated destination: / + filter_example_mirror: executor: ubuntu-build steps: @@ -114,17 +115,6 @@ jobs: ci/do_circle_ci.sh bazel.clang_tidy no_output_timeout: 60m - format: - executor: ubuntu-build - resource_class: small - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - run: pip install -r tools/requirements.txt - - run: ci/do_circle_ci.sh check_format - - run: ci/do_circle_ci.sh check_repositories - - run: ci/do_circle_ci.sh check_spelling - - run: ci/do_circle_ci.sh check_spelling_pedantic build_image: docker: - image: google/cloud-sdk @@ -165,7 +155,6 @@ workflows: - coverage - coverage_publish: requires: [coverage] - - format - clang_tidy - build_image - docs: diff --git a/README.md b/README.md index abe4e9822462..7455570302d2 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ involved and how Envoy plays a role, read the CNCF [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1266/badge)](https://bestpractices.coreinfrastructure.org/projects/1266) [![Azure Pipelines](https://dev.azure.com/cncf/envoy/_apis/build/status/envoyproxy.envoy.mac?branchName=master)](https://dev.azure.com/cncf/envoy/_build/latest?definitionId=2&branchName=master) [![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master) +[![fuzzit](https://app.fuzzit.dev/badge?org_id=envoyproxy)](https://app.fuzzit.dev/orgs/envoyproxy/dashboard) [![Jenkins](https://img.shields.io/jenkins/s/https/powerci.osuosl.org/job/build-envoy-master/badge/icon/.svg?label=ppc64le%20build)](http://powerci.osuosl.org/job/build-envoy-master/) ## Documentation diff --git a/api/STYLE.md b/api/STYLE.md index 0289c5f85af2..fd1bb2c8e3e0 100644 --- a/api/STYLE.md +++ b/api/STYLE.md @@ -47,9 +47,8 @@ In addition, the following conventions should be followed: * Protos for configs and services that are not implemented immediately in Envoy, or are under active design and development should be versioned - "v2alpha". If several iterations of the alpha API are expected, then versions - "v2alpha1", "v2alpha2", and so on are preferred. Alpha-versioned protos are - considered experimental and are not required to preserve compatibility. + "vNalpha". See the [stable API versioning + policy](https://github.com/envoyproxy/envoy/issues/6271). * Every proto directory should have a `README.md` describing its content. See for example [envoy.service](envoy/service/README.md). diff --git a/api/bazel/api_build_system.bzl b/api/bazel/api_build_system.bzl index fb6f40920ed8..25bca4673541 100644 --- a/api/bazel/api_build_system.bzl +++ b/api/bazel/api_build_system.bzl @@ -9,7 +9,7 @@ _CC_SUFFIX = "_cc" _CC_GRPC_SUFFIX = "_cc_grpc" _CC_EXPORT_SUFFIX = "_export_cc" _GO_PROTO_SUFFIX = "_go_proto" -_GO_IMPORTPATH_PREFIX = "github.com/envoyproxy/data-plane-api/api/" +_GO_IMPORTPATH_PREFIX = "github.com/envoyproxy/go-control-plane/" _COMMON_PROTO_DEPS = [ "@com_google_protobuf//:any_proto", @@ -105,6 +105,7 @@ def api_proto_library( visibility = ["//visibility:private"], srcs = [], deps = [], + tags = [], external_proto_deps = [], external_cc_proto_deps = [], external_py_proto_deps = [], @@ -116,6 +117,7 @@ def api_proto_library( name = name, srcs = srcs, deps = deps + external_proto_deps + _COMMON_PROTO_DEPS, + tags = tags, visibility = visibility, ) cc_proto_library_name = _Suffix(name, _CC_SUFFIX) diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl index 1362c5671acb..27d0219ae8af 100644 --- a/api/bazel/repositories.bzl +++ b/api/bazel/repositories.bzl @@ -54,7 +54,7 @@ api_proto_library( go_proto_library( name = "client_model_go_proto", - importpath = "client_model", + importpath = "github.com/prometheus/client_model/go", proto = ":client_model", visibility = ["//visibility:public"], ) diff --git a/api/envoy/admin/v2alpha/certs.proto b/api/envoy/admin/v2alpha/certs.proto index c6d5e4154aed..04f78ceed142 100644 --- a/api/envoy/admin/v2alpha/certs.proto +++ b/api/envoy/admin/v2alpha/certs.proto @@ -19,7 +19,6 @@ message Certificates { } message Certificate { - // Details of CA certificate. repeated CertificateDetails ca_cert = 1; @@ -48,10 +47,10 @@ message CertificateDetails { } message SubjectAlternateName { - // Subject Alternate Name. oneof name { string dns = 1; + string uri = 2; } } diff --git a/api/envoy/admin/v2alpha/clusters.proto b/api/envoy/admin/v2alpha/clusters.proto index cc2c95110c6a..8119821e9dc9 100644 --- a/api/envoy/admin/v2alpha/clusters.proto +++ b/api/envoy/admin/v2alpha/clusters.proto @@ -45,7 +45,7 @@ message ClusterStatus { // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. - envoy.type.Percent success_rate_ejection_threshold = 3; + type.Percent success_rate_ejection_threshold = 3; // Mapping from host address to the host's current status. repeated HostStatus host_statuses = 4; @@ -65,13 +65,13 @@ message ClusterStatus { // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. - envoy.type.Percent local_origin_success_rate_ejection_threshold = 5; + type.Percent local_origin_success_rate_ejection_threshold = 5; } // Current state of a particular host. message HostStatus { // Address of this host. - envoy.api.v2.core.Address address = 1; + api.v2.core.Address address = 1; // List of stats specific to this host. repeated SimpleMetric stats = 2; @@ -92,7 +92,7 @@ message HostStatus { // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. - envoy.type.Percent success_rate = 4; + type.Percent success_rate = 4; // The host's weight. If not configured, the value defaults to 1. uint32 weight = 5; @@ -115,7 +115,7 @@ message HostStatus { // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. - envoy.type.Percent local_origin_success_rate = 8; + type.Percent local_origin_success_rate = 8; } // Health status for a host. @@ -139,5 +139,5 @@ message HostHealthStatus { // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported // here. // TODO(mrice32): pipe through remaining EDS health status possibilities. - envoy.api.v2.core.HealthStatus eds_health_status = 3; + api.v2.core.HealthStatus eds_health_status = 3; } diff --git a/api/envoy/admin/v2alpha/config_dump.proto b/api/envoy/admin/v2alpha/config_dump.proto index 10c57f06f1b7..15812a9ffd4d 100644 --- a/api/envoy/admin/v2alpha/config_dump.proto +++ b/api/envoy/admin/v2alpha/config_dump.proto @@ -39,7 +39,7 @@ message ConfigDump { // the static portions of an Envoy configuration by reusing the output as the bootstrap // configuration for another Envoy. message BootstrapConfigDump { - envoy.config.bootstrap.v2.Bootstrap bootstrap = 1; + config.bootstrap.v2.Bootstrap bootstrap = 1; // The timestamp when the BootstrapConfig was last updated. google.protobuf.Timestamp last_updated = 2; @@ -49,15 +49,10 @@ message BootstrapConfigDump { // configuration information can be used to recreate an Envoy configuration by populating all // listeners as static listeners or by returning them in a LDS response. message ListenersConfigDump { - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - string version_info = 1; - // Describes a statically loaded listener. message StaticListener { // The listener config. - envoy.api.v2.Listener listener = 1; + api.v2.Listener listener = 1; // The timestamp when the Listener was last updated. google.protobuf.Timestamp last_updated = 2; @@ -72,12 +67,17 @@ message ListenersConfigDump { string version_info = 1; // The listener config. - envoy.api.v2.Listener listener = 2; + api.v2.Listener listener = 2; // The timestamp when the Listener was last updated. google.protobuf.Timestamp last_updated = 3; } + // This is the :ref:`version_info ` in the + // last processed LDS discovery response. If there are only static bootstrap listeners, this field + // will be "". + string version_info = 1; + // The statically loaded listener configs. repeated StaticListener static_listeners = 2; @@ -102,15 +102,10 @@ message ListenersConfigDump { // configuration information can be used to recreate an Envoy configuration by populating all // clusters as static clusters or by returning them in a CDS response. message ClustersConfigDump { - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - string version_info = 1; - // Describes a statically loaded cluster. message StaticCluster { // The cluster config. - envoy.api.v2.Cluster cluster = 1; + api.v2.Cluster cluster = 1; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 2; @@ -125,12 +120,17 @@ message ClustersConfigDump { string version_info = 1; // The cluster config. - envoy.api.v2.Cluster cluster = 2; + api.v2.Cluster cluster = 2; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 3; } + // This is the :ref:`version_info ` in the + // last processed CDS discovery response. If there are only static bootstrap clusters, this field + // will be "". + string version_info = 1; + // The statically loaded cluster configs. repeated StaticCluster static_clusters = 2; @@ -153,7 +153,7 @@ message ClustersConfigDump { message RoutesConfigDump { message StaticRouteConfig { // The route config. - envoy.api.v2.RouteConfiguration route_config = 1; + api.v2.RouteConfiguration route_config = 1; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 2; @@ -166,7 +166,7 @@ message RoutesConfigDump { string version_info = 1; // The route config. - envoy.api.v2.RouteConfiguration route_config = 2; + api.v2.RouteConfiguration route_config = 2; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 3; @@ -189,7 +189,7 @@ message ScopedRoutesConfigDump { string name = 1; // The scoped route configurations. - repeated envoy.api.v2.ScopedRouteConfiguration scoped_route_configs = 2; + repeated api.v2.ScopedRouteConfiguration scoped_route_configs = 2; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 3; @@ -205,7 +205,7 @@ message ScopedRoutesConfigDump { string version_info = 2; // The scoped route configurations. - repeated envoy.api.v2.ScopedRouteConfiguration scoped_route_configs = 3; + repeated api.v2.ScopedRouteConfiguration scoped_route_configs = 3; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 4; @@ -234,7 +234,7 @@ message SecretsConfigDump { // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. - envoy.api.v2.auth.Secret secret = 4; + api.v2.auth.Secret secret = 4; } // StaticSecret specifies statically loaded secret in bootstrap. @@ -248,7 +248,7 @@ message SecretsConfigDump { // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. - envoy.api.v2.auth.Secret secret = 3; + api.v2.auth.Secret secret = 3; } // The statically loaded secrets. diff --git a/api/envoy/admin/v2alpha/listeners.proto b/api/envoy/admin/v2alpha/listeners.proto index 87a4e1d9f739..e84f64540857 100644 --- a/api/envoy/admin/v2alpha/listeners.proto +++ b/api/envoy/admin/v2alpha/listeners.proto @@ -24,5 +24,5 @@ message ListenerStatus { // The actual local address that the listener is listening on. If a listener was configured // to listen on port 0, then this address has the port that was allocated by the OS. - envoy.api.v2.core.Address local_address = 2; + api.v2.core.Address local_address = 2; } diff --git a/api/envoy/admin/v2alpha/memory.proto b/api/envoy/admin/v2alpha/memory.proto index d86e44881056..6173b33cd455 100644 --- a/api/envoy/admin/v2alpha/memory.proto +++ b/api/envoy/admin/v2alpha/memory.proto @@ -12,7 +12,6 @@ option java_package = "io.envoyproxy.envoy.admin.v2alpha"; // values extracted from an internal TCMalloc instance. For more information, see the section of the // docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). message Memory { - // The number of bytes allocated by the heap for Envoy. This is an alias for // `generic.current_allocated_bytes`. uint64 allocated = 1; diff --git a/api/envoy/admin/v2alpha/mutex_stats.proto b/api/envoy/admin/v2alpha/mutex_stats.proto index 272d7224b0e9..682ff5b49354 100644 --- a/api/envoy/admin/v2alpha/mutex_stats.proto +++ b/api/envoy/admin/v2alpha/mutex_stats.proto @@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.admin.v2alpha"; // correspond to core clock frequency. For more information, see the `CycleClock` // [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). message MutexStats { - // The number of individual mutex contentions which have occurred since startup. uint64 num_contentions = 1; diff --git a/api/envoy/admin/v2alpha/server_info.proto b/api/envoy/admin/v2alpha/server_info.proto index 78cc6fa7020a..c2b44c06ca49 100644 --- a/api/envoy/admin/v2alpha/server_info.proto +++ b/api/envoy/admin/v2alpha/server_info.proto @@ -13,20 +13,23 @@ import "google/protobuf/duration.proto"; // Proto representation of the value returned by /server_info, containing // server version/server status information. message ServerInfo { - // Server version. - string version = 1; - enum State { // Server is live and serving traffic. LIVE = 0; + // Server is draining listeners in response to external health checks failing. DRAINING = 1; + // Server has not yet completed cluster manager initialization. PRE_INITIALIZING = 2; + // Server is running the cluster manager initialization callbacks (e.g., RDS). INITIALIZING = 3; } + // Server version. + string version = 1; + // State of the server. State state = 2; @@ -44,6 +47,24 @@ message ServerInfo { } message CommandLineOptions { + enum IpVersion { + v4 = 0; + v6 = 1; + } + + enum Mode { + // Validate configs and then serve traffic normally. + Serve = 0; + + // Validate configs and exit. + Validate = 1; + + // Completely load and initialize the config, and then exit without running the listener loop. + InitOnly = 2; + } + + reserved 12; + // See :option:`--base-id` for details. uint64 base_id = 1; @@ -65,11 +86,6 @@ message CommandLineOptions { // See :option:`--admin-address-path` for details. string admin_address_path = 6; - enum IpVersion { - v4 = 0; - v6 = 1; - } - // See :option:`--local-address-ip-version` for details. IpVersion local_address_ip_version = 7; @@ -85,8 +101,6 @@ message CommandLineOptions { // See :option:`--log-path` for details. string log_path = 11; - reserved 12; - // See :option:`--service-cluster` for details. string service_cluster = 13; @@ -105,22 +119,12 @@ message CommandLineOptions { // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; - enum Mode { - // Validate configs and then serve traffic normally. - Serve = 0; - - // Validate configs and exit. - Validate = 1; - - // Completely load and initialize the config, and then exit without running the listener loop. - InitOnly = 2; - } - // See :option:`--mode` for details. Mode mode = 19; // max_stats and max_obj_name_len are now unused and have no effect. uint64 max_stats = 20 [deprecated = true]; + uint64 max_obj_name_len = 21 [deprecated = true]; // See :option:`--disable-hot-restart` for details. diff --git a/api/envoy/admin/v2alpha/tap.proto b/api/envoy/admin/v2alpha/tap.proto index 789be14b01c3..d7caf609af52 100644 --- a/api/envoy/admin/v2alpha/tap.proto +++ b/api/envoy/admin/v2alpha/tap.proto @@ -1,20 +1,21 @@ syntax = "proto3"; -import "envoy/service/tap/v2alpha/common.proto"; -import "validate/validate.proto"; - package envoy.admin.v2alpha; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; +import "envoy/service/tap/v2alpha/common.proto"; + +import "validate/validate.proto"; + // The /tap admin request body that is used to configure an active tap session. message TapRequest { // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string.min_bytes = 1]; + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; // The tap configuration to load. - service.tap.v2alpha.TapConfig tap_config = 2 [(validate.rules).message.required = true]; + service.tap.v2alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/admin/v3alpha/certs.proto b/api/envoy/admin/v3alpha/certs.proto index e34fd36d992b..788e49f59e37 100644 --- a/api/envoy/admin/v3alpha/certs.proto +++ b/api/envoy/admin/v3alpha/certs.proto @@ -19,7 +19,6 @@ message Certificates { } message Certificate { - // Details of CA certificate. repeated CertificateDetails ca_cert = 1; @@ -48,10 +47,10 @@ message CertificateDetails { } message SubjectAlternateName { - // Subject Alternate Name. oneof name { string dns = 1; + string uri = 2; } } diff --git a/api/envoy/admin/v3alpha/clusters.proto b/api/envoy/admin/v3alpha/clusters.proto index 093448d9f82c..2fc7335924fa 100644 --- a/api/envoy/admin/v3alpha/clusters.proto +++ b/api/envoy/admin/v3alpha/clusters.proto @@ -45,7 +45,7 @@ message ClusterStatus { // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. - envoy.type.Percent success_rate_ejection_threshold = 3; + type.Percent success_rate_ejection_threshold = 3; // Mapping from host address to the host's current status. repeated HostStatus host_statuses = 4; @@ -65,13 +65,13 @@ message ClusterStatus { // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. - envoy.type.Percent local_origin_success_rate_ejection_threshold = 5; + type.Percent local_origin_success_rate_ejection_threshold = 5; } // Current state of a particular host. message HostStatus { // Address of this host. - envoy.api.v3alpha.core.Address address = 1; + api.v3alpha.core.Address address = 1; // List of stats specific to this host. repeated SimpleMetric stats = 2; @@ -92,7 +92,7 @@ message HostStatus { // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. - envoy.type.Percent success_rate = 4; + type.Percent success_rate = 4; // The host's weight. If not configured, the value defaults to 1. uint32 weight = 5; @@ -115,7 +115,7 @@ message HostStatus { // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. - envoy.type.Percent local_origin_success_rate = 8; + type.Percent local_origin_success_rate = 8; } // Health status for a host. @@ -139,5 +139,5 @@ message HostHealthStatus { // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported // here. // TODO(mrice32): pipe through remaining EDS health status possibilities. - envoy.api.v3alpha.core.HealthStatus eds_health_status = 3; + api.v3alpha.core.HealthStatus eds_health_status = 3; } diff --git a/api/envoy/admin/v3alpha/config_dump.proto b/api/envoy/admin/v3alpha/config_dump.proto index cc6afbdbadf9..6e1c624c48f2 100644 --- a/api/envoy/admin/v3alpha/config_dump.proto +++ b/api/envoy/admin/v3alpha/config_dump.proto @@ -39,7 +39,7 @@ message ConfigDump { // the static portions of an Envoy configuration by reusing the output as the bootstrap // configuration for another Envoy. message BootstrapConfigDump { - envoy.config.bootstrap.v3alpha.Bootstrap bootstrap = 1; + config.bootstrap.v3alpha.Bootstrap bootstrap = 1; // The timestamp when the BootstrapConfig was last updated. google.protobuf.Timestamp last_updated = 2; @@ -49,15 +49,10 @@ message BootstrapConfigDump { // configuration information can be used to recreate an Envoy configuration by populating all // listeners as static listeners or by returning them in a LDS response. message ListenersConfigDump { - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - string version_info = 1; - // Describes a statically loaded listener. message StaticListener { // The listener config. - envoy.api.v3alpha.Listener listener = 1; + api.v3alpha.Listener listener = 1; // The timestamp when the Listener was last updated. google.protobuf.Timestamp last_updated = 2; @@ -72,12 +67,17 @@ message ListenersConfigDump { string version_info = 1; // The listener config. - envoy.api.v3alpha.Listener listener = 2; + api.v3alpha.Listener listener = 2; // The timestamp when the Listener was last updated. google.protobuf.Timestamp last_updated = 3; } + // This is the :ref:`version_info ` in the + // last processed LDS discovery response. If there are only static bootstrap listeners, this field + // will be "". + string version_info = 1; + // The statically loaded listener configs. repeated StaticListener static_listeners = 2; @@ -102,15 +102,10 @@ message ListenersConfigDump { // configuration information can be used to recreate an Envoy configuration by populating all // clusters as static clusters or by returning them in a CDS response. message ClustersConfigDump { - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - string version_info = 1; - // Describes a statically loaded cluster. message StaticCluster { // The cluster config. - envoy.api.v3alpha.Cluster cluster = 1; + api.v3alpha.Cluster cluster = 1; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 2; @@ -125,12 +120,17 @@ message ClustersConfigDump { string version_info = 1; // The cluster config. - envoy.api.v3alpha.Cluster cluster = 2; + api.v3alpha.Cluster cluster = 2; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 3; } + // This is the :ref:`version_info ` in the + // last processed CDS discovery response. If there are only static bootstrap clusters, this field + // will be "". + string version_info = 1; + // The statically loaded cluster configs. repeated StaticCluster static_clusters = 2; @@ -153,7 +153,7 @@ message ClustersConfigDump { message RoutesConfigDump { message StaticRouteConfig { // The route config. - envoy.api.v3alpha.RouteConfiguration route_config = 1; + api.v3alpha.RouteConfiguration route_config = 1; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 2; @@ -166,7 +166,7 @@ message RoutesConfigDump { string version_info = 1; // The route config. - envoy.api.v3alpha.RouteConfiguration route_config = 2; + api.v3alpha.RouteConfiguration route_config = 2; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 3; @@ -189,7 +189,7 @@ message ScopedRoutesConfigDump { string name = 1; // The scoped route configurations. - repeated envoy.api.v3alpha.ScopedRouteConfiguration scoped_route_configs = 2; + repeated api.v3alpha.ScopedRouteConfiguration scoped_route_configs = 2; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 3; @@ -205,7 +205,7 @@ message ScopedRoutesConfigDump { string version_info = 2; // The scoped route configurations. - repeated envoy.api.v3alpha.ScopedRouteConfiguration scoped_route_configs = 3; + repeated api.v3alpha.ScopedRouteConfiguration scoped_route_configs = 3; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 4; @@ -234,7 +234,7 @@ message SecretsConfigDump { // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. - envoy.api.v3alpha.auth.Secret secret = 4; + api.v3alpha.auth.Secret secret = 4; } // StaticSecret specifies statically loaded secret in bootstrap. @@ -248,7 +248,7 @@ message SecretsConfigDump { // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. - envoy.api.v3alpha.auth.Secret secret = 3; + api.v3alpha.auth.Secret secret = 3; } // The statically loaded secrets. diff --git a/api/envoy/admin/v3alpha/listeners.proto b/api/envoy/admin/v3alpha/listeners.proto index 5e4d121b1fb2..7fc998f47fbd 100644 --- a/api/envoy/admin/v3alpha/listeners.proto +++ b/api/envoy/admin/v3alpha/listeners.proto @@ -24,5 +24,5 @@ message ListenerStatus { // The actual local address that the listener is listening on. If a listener was configured // to listen on port 0, then this address has the port that was allocated by the OS. - envoy.api.v3alpha.core.Address local_address = 2; + api.v3alpha.core.Address local_address = 2; } diff --git a/api/envoy/admin/v3alpha/memory.proto b/api/envoy/admin/v3alpha/memory.proto index 4c17be034e47..e6e057a53777 100644 --- a/api/envoy/admin/v3alpha/memory.proto +++ b/api/envoy/admin/v3alpha/memory.proto @@ -12,7 +12,6 @@ option java_package = "io.envoyproxy.envoy.admin.v3alpha"; // values extracted from an internal TCMalloc instance. For more information, see the section of the // docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). message Memory { - // The number of bytes allocated by the heap for Envoy. This is an alias for // `generic.current_allocated_bytes`. uint64 allocated = 1; diff --git a/api/envoy/admin/v3alpha/mutex_stats.proto b/api/envoy/admin/v3alpha/mutex_stats.proto index 72350dea8d77..8d53ec97919a 100644 --- a/api/envoy/admin/v3alpha/mutex_stats.proto +++ b/api/envoy/admin/v3alpha/mutex_stats.proto @@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.admin.v3alpha"; // correspond to core clock frequency. For more information, see the `CycleClock` // [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). message MutexStats { - // The number of individual mutex contentions which have occurred since startup. uint64 num_contentions = 1; diff --git a/api/envoy/admin/v3alpha/server_info.proto b/api/envoy/admin/v3alpha/server_info.proto index a259a87f1651..34fe2e0f8d74 100644 --- a/api/envoy/admin/v3alpha/server_info.proto +++ b/api/envoy/admin/v3alpha/server_info.proto @@ -13,20 +13,23 @@ import "google/protobuf/duration.proto"; // Proto representation of the value returned by /server_info, containing // server version/server status information. message ServerInfo { - // Server version. - string version = 1; - enum State { // Server is live and serving traffic. LIVE = 0; + // Server is draining listeners in response to external health checks failing. DRAINING = 1; + // Server has not yet completed cluster manager initialization. PRE_INITIALIZING = 2; + // Server is running the cluster manager initialization callbacks (e.g., RDS). INITIALIZING = 3; } + // Server version. + string version = 1; + // State of the server. State state = 2; @@ -44,6 +47,24 @@ message ServerInfo { } message CommandLineOptions { + enum IpVersion { + v4 = 0; + v6 = 1; + } + + enum Mode { + // Validate configs and then serve traffic normally. + Serve = 0; + + // Validate configs and exit. + Validate = 1; + + // Completely load and initialize the config, and then exit without running the listener loop. + InitOnly = 2; + } + + reserved 12; + // See :option:`--base-id` for details. uint64 base_id = 1; @@ -65,11 +86,6 @@ message CommandLineOptions { // See :option:`--admin-address-path` for details. string admin_address_path = 6; - enum IpVersion { - v4 = 0; - v6 = 1; - } - // See :option:`--local-address-ip-version` for details. IpVersion local_address_ip_version = 7; @@ -85,8 +101,6 @@ message CommandLineOptions { // See :option:`--log-path` for details. string log_path = 11; - reserved 12; - // See :option:`--service-cluster` for details. string service_cluster = 13; @@ -105,22 +119,12 @@ message CommandLineOptions { // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; - enum Mode { - // Validate configs and then serve traffic normally. - Serve = 0; - - // Validate configs and exit. - Validate = 1; - - // Completely load and initialize the config, and then exit without running the listener loop. - InitOnly = 2; - } - // See :option:`--mode` for details. Mode mode = 19; // max_stats and max_obj_name_len are now unused and have no effect. uint64 max_stats = 20 [deprecated = true]; + uint64 max_obj_name_len = 21 [deprecated = true]; // See :option:`--disable-hot-restart` for details. diff --git a/api/envoy/admin/v3alpha/tap.proto b/api/envoy/admin/v3alpha/tap.proto index b6fd6a85f567..7fbf3f905a60 100644 --- a/api/envoy/admin/v3alpha/tap.proto +++ b/api/envoy/admin/v3alpha/tap.proto @@ -1,20 +1,21 @@ syntax = "proto3"; -import "envoy/service/tap/v3alpha/common.proto"; -import "validate/validate.proto"; - package envoy.admin.v3alpha; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.admin.v3alpha"; +import "envoy/service/tap/v3alpha/common.proto"; + +import "validate/validate.proto"; + // The /tap admin request body that is used to configure an active tap session. message TapRequest { // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string.min_bytes = 1]; + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; // The tap configuration to load. - service.tap.v3alpha.TapConfig tap_config = 2 [(validate.rules).message.required = true]; + service.tap.v3alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/api/v2/BUILD b/api/envoy/api/v2/BUILD index 7e6536dc13ab..9bffa27360ee 100644 --- a/api/envoy/api/v2/BUILD +++ b/api/envoy/api/v2/BUILD @@ -27,6 +27,7 @@ api_proto_package( "//envoy/api/v2/listener:pkg", "//envoy/api/v2/ratelimit:pkg", "//envoy/api/v2/route:pkg", + "//envoy/config/listener/v2:pkg", "//envoy/type", ], ) @@ -87,6 +88,7 @@ api_proto_library_internal( "//envoy/api/v2/core:base", "//envoy/api/v2/listener", "//envoy/api/v2/listener:udp_listener_config", + "//envoy/config/listener/v2:api_listener", ], ) diff --git a/api/envoy/api/v2/auth/cert.proto b/api/envoy/api/v2/auth/cert.proto index 0f331268205f..1710b57f7af2 100644 --- a/api/envoy/api/v2/auth/cert.proto +++ b/api/envoy/api/v2/auth/cert.proto @@ -36,11 +36,11 @@ message TlsParameters { } // Minimum TLS protocol version. By default, it's ``TLSv1_0``. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum.defined_only = true]; + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum.defined_only = true]; + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list // `_ @@ -106,7 +106,7 @@ message TlsParameters { message PrivateKeyProvider { // Private key method provider name. The name must match a // supported private key method provider type. - string provider_name = 1 [(validate.rules).string.min_bytes = 1]; + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; // Private key method provider specific configuration. oneof config_type { @@ -167,7 +167,7 @@ message TlsSessionTicketKeys { // * Keep the session ticket keys at least as secure as your TLS certificate private keys // * Rotate session ticket keys at least daily, and preferably hourly // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 [(validate.rules).repeated .min_items = 1]; + repeated core.DataSource keys = 1 [(validate.rules).repeated = {min_items: 1}]; } message CertificateValidationContext { @@ -201,9 +201,9 @@ message CertificateValidationContext { // // .. code-block:: bash // - // $ openssl x509 -in path/to/client.crt -noout -pubkey \ - // | openssl pkey -pubin -outform DER \ - // | openssl dgst -sha256 -binary \ + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary // | openssl enc -base64 // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= // @@ -223,7 +223,7 @@ message CertificateValidationContext { // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 - [(validate.rules).repeated .items.string = {min_bytes: 44, max_bytes: 44}]; + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. @@ -252,7 +252,7 @@ message CertificateValidationContext { // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 - [(validate.rules).repeated .items.string = {min_bytes: 64, max_bytes: 95}]; + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative Names. If specified, Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified values. @@ -283,6 +283,18 @@ message CertificateValidationContext { // TLS context shared by both client and server TLS contexts. message CommonTlsContext { + message CombinedCertificateValidationContext { + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + // TLS protocol versions, cipher suites etc. TlsParameters tls_params = 1; @@ -296,17 +308,7 @@ message CommonTlsContext { // Configs for fetching TLS certificates via SDS API. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated .max_items = 1]; - - message CombinedCertificateValidationContext { - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message.required = true]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message.required = true]; - }; + [(validate.rules).repeated = {max_items: 1}]; oneof validation_context_type { // How to validate peer certificates. @@ -336,8 +338,6 @@ message CommonTlsContext { // // There is no default for this parameter. If empty, Envoy will not expose ALPN. repeated string alpn_protocols = 4; - - reserved 5; } message UpstreamTlsContext { @@ -345,7 +345,7 @@ message UpstreamTlsContext { CommonTlsContext common_tls_context = 1; // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string.max_bytes = 255]; + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; // If true, server-initiated TLS renegotiation will be allowed. // @@ -386,8 +386,10 @@ message DownstreamTlsContext { message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via SDS. - // When only name is specified, then secret will be loaded from static resources [V2-API-DIFF]. + // When only name is specified, then secret will be loaded from static + // resources. string name = 1; + core.ConfigSource sds_config = 2; } @@ -395,9 +397,12 @@ message SdsSecretConfig { message Secret { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. string name = 1; + oneof type { TlsCertificate tls_certificate = 2; + TlsSessionTicketKeys session_ticket_keys = 3; + CertificateValidationContext validation_context = 4; } } diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto index 85e9a3827c3c..719fe0f6ab91 100644 --- a/api/envoy/api/v2/cds.proto +++ b/api/envoy/api/v2/cds.proto @@ -5,19 +5,18 @@ package envoy.api.v2; option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; - option java_generic_services = true; -import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/cluster/circuit_breaker.proto"; +import "envoy/api/v2/cluster/filter.proto"; +import "envoy/api/v2/cluster/outlier_detection.proto"; +import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/discovery.proto"; import "envoy/api/v2/core/health_check.proto"; import "envoy/api/v2/core/protocol.proto"; -import "envoy/api/v2/cluster/circuit_breaker.proto"; -import "envoy/api/v2/cluster/filter.proto"; -import "envoy/api/v2/cluster/outlier_detection.proto"; +import "envoy/api/v2/discovery.proto"; import "envoy/api/v2/eds.proto"; import "envoy/type/percent.proto"; @@ -29,6 +28,8 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; +// [#protodoc-title: Clusters] + // Return list of all clusters this proxy will load balance to. service ClusterDiscoveryService { rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { @@ -45,24 +46,9 @@ service ClusterDiscoveryService { } } -// [#protodoc-title: Clusters] - // Configuration for a single upstream cluster. -// [#comment:next free field: 42] +// [#comment:next free field: 44] message Cluster { - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // An optional alternative to the cluster name to be used while emitting stats. - // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be - // confused with :ref:`Router Filter Header - // `. - string alt_stat_name = 28; - // Refer to :ref:`service discovery type ` // for an explanation on each type. enum DiscoveryType { @@ -90,45 +76,6 @@ message Cluster { ORIGINAL_DST = 4; } - // Extended cluster type. - message CustomClusterType { - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - google.protobuf.Any typed_config = 2; - } - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - // Configuration for the source of EDS updates for this Cluster. - core.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; - } - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration.gt = {}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; - // Refer to :ref:`load balancer type ` architecture // overview section for information on each type. enum LbPolicy { @@ -178,110 +125,6 @@ message Cluster { // configuring this.] LOAD_BALANCING_POLICY_CONFIG = 7; } - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum.defined_only = true]; - - // If the service discovery type is - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS`, - // then hosts is required. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`load_assignment` field instead. - // - repeated core.Address hosts = 7; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes :ref:`hosts` field. - // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` - // once load_assignment is implemented.] - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // Setting this overrides :ref:`hosts` values. - // - ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.HealthCheck health_checks = 8; - - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; - - // Optional :ref:`circuit breaking ` for the cluster. - cluster.CircuitBreakers circuit_breakers = 10; - - // The TLS configuration for connections to the upstream cluster. If no TLS - // configuration is specified, TLS will not be used for new connections. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - auth.UpstreamTlsContext tls_context = 11; - - reserved 12; - - // Additional options when handling HTTP requests. These options will be applicable to both - // HTTP1 and HTTP2 requests. - core.HttpProtocolOptions common_http_protocol_options = 29; - - // Additional options when handling HTTP1 requests. - core.Http1ProtocolOptions http_protocol_options = 13; - - // Even if default HTTP2 protocol options are desired, this field must be - // set so that Envoy will assume that the upstream supports HTTP/2 when - // making new HTTP connection pool connections. Currently, Envoy only - // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - // connections to happen over plain text. - core.Http2ProtocolOptions http2_protocol_options = 14; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map extension_protocol_options = 35; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map typed_extension_protocol_options = 36; - - reserved 15; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. If this setting is not specified, the value defaults to 5000ms. For - // cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration.gt = {}]; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; // When V4_ONLY is selected, the DNS resolver will only perform a lookup for // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will @@ -299,52 +142,57 @@ message Cluster { V6_ONLY = 2; } - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum.defined_only = true]; + enum ClusterProtocolSelection { + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + USE_CONFIGURED_PROTOCOL = 0; - // If DNS resolvers are specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used to specify the cluster’s dns resolvers. - // If this setting is not specified, the value defaults to the default - // resolver, which uses /etc/resolv.conf for configuration. For cluster types - // other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - repeated core.Address dns_resolvers = 18; + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + USE_DOWNSTREAM_PROTOCOL = 1; + } - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - cluster.OutlierDetection outlier_detection = 19; + // TransportSocketMatch specifies what transport socket config will be used + // when the match conditions are satisfied. + message TransportSocketMatch { + // The name of the match, used in stats generation. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Optional endpoint metadata match criteria. + // The connection to the endpoint with metadata matching what is set in this field + // will use the transport socket configuration specified here. + // The endpoint's metadata entry in *envoy.transport_socket* is used to match + // against the values specified in this field. + google.protobuf.Struct match = 2; + + // The configuration of the transport socket. + core.TransportSocket transport_socket = 3; + } - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration.gt = {}]; + // Extended cluster type. + message CustomClusterType { + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.BindConfig upstream_bind_config = 21; + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + google.protobuf.Any typed_config = 2; + } + + // Only valid when discovery type is EDS. + message EdsClusterConfig { + // Configuration for the source of EDS updates for this Cluster. + core.ConfigSource eds_config = 1; + + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + } // Optionally divide the endpoints in this cluster into subsets defined by // endpoint metadata and selected by route and weighted cluster metadata. message LbSubsetConfig { - // If NO_FALLBACK is selected, a result // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, // any cluster endpoint may be returned (subject to policy, health checks, @@ -356,45 +204,49 @@ message Cluster { DEFAULT_SUBSET = 2; } - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum.defined_only = true]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - // Specifications for subsets. message LbSubsetSelector { - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum.defined_only = true]; - // Allows to override top level fallback policy per selector. enum LbSubsetSelectorFallbackPolicy { // If NOT_DEFINED top level config fallback policy is used instead. NOT_DEFINED = 0; + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. NO_FALLBACK = 1; + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned // (subject to policy, health checks, etc). ANY_ENDPOINT = 2; + // If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. DEFAULT_SUBSET = 3; } + + // List of keys to match with the weighted cluster metadata. + repeated string keys = 1; + + // The behavior used when no endpoint subset matches the selected route's + // metadata. + LbSubsetSelectorFallbackPolicy fallback_policy = 2 + [(validate.rules).enum = {defined_only: true}]; } + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; + + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the *envoy.lb* + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + google.protobuf.Struct default_subset = 2; + // For each entry, LbEndpoint.Metadata's // *envoy.lb* namespace is traversed and a subset is created for each unique // combination of key and value. For example: @@ -443,45 +295,43 @@ message Cluster { bool list_as_any = 7; } - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - // Specific configuration for the LeastRequest load balancing policy. message LeastRequestLbConfig { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32.gte = 2]; + google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; } // Specific configuration for the :ref:`RingHash` // load balancing policy. message RingHashLbConfig { - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64.lte = 8388608]; - - reserved 2; - // The hash function used to hash hosts onto the ketama ring. enum HashFunction { // Use `xxHash `_, this is the default hash function. XX_HASH = 0; + // Use `MurmurHash2 `_, this is compatible with // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled // on Linux and not macOS. MURMUR_HASH_2 = 1; } + reserved 2; + + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; + // The hash function used to hash hosts onto the ketama ring. The value defaults to // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum.defined_only = true]; + HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered // to further constrain resource use. See also // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64.lte = 8388608]; + google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; } // Specific configuration for the @@ -500,31 +350,8 @@ message Cluster { bool use_http_header = 1; } - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - // Common configuration for all load balancer implementations. message CommonLbConfig { - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - envoy.type.Percent healthy_panic_threshold = 1; // Configuration for :ref:`zone aware routing // `. message ZoneAwareLbConfig { @@ -532,7 +359,8 @@ message Cluster { // if zone aware routing is configured. If not specified, the default is 100%. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. - envoy.type.Percent routing_enabled = 1; + type.Percent routing_enabled = 1; + // Configures minimum upstream cluster size required for zone aware routing // If upstream cluster size is less than specified, zone aware routing is not performed // even if zone aware routing is configured. If not specified, the default is 6. @@ -546,14 +374,26 @@ message Cluster { // failing service. bool fail_traffic_on_panic = 3; } + // Configuration for :ref:`locality weighted load balancing // ` message LocalityWeightedLbConfig { } + + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // The specified percent will be truncated to the nearest 1%. + type.Percent healthy_panic_threshold = 1; + oneof locality_config_specifier { ZoneAwareLbConfig zone_aware_lb_config = 2; + LocalityWeightedLbConfig locality_weighted_lb_config = 3; } + // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when // the first update happens. This is useful for big clusters, with potentially noisy deploys @@ -596,6 +436,252 @@ message Cluster { bool close_connections_on_host_set_change = 6; } + reserved 12, 15; + + // Configuration to use different transport sockets for different endpoints. + // The entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata ` + // is used to match against the transport sockets as they appear in the list. The first + // :ref:`match ` is used. + // For example, with the following match + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "enableMTLS" + // match: + // acceptMTLS: true + // transport_socket: + // name: tls + // config: { ... } # tls socket configuration + // - name: "defaultToPlaintext" + // match: {} + // transport_socket: + // name: "rawbuffer" + // + // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + // + // If a :ref:`socket match ` with empty match + // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + // socket match in case above. + // + // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or + // *transport_socket* specified in this cluster. + // + // This field allows gradual and flexible transport socket configuration changes. + // + // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + // has "acceptPlaintext": "true" metadata information. + // + // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + // *TransportSocketMatch* in this field. Other client Envoys receive CDS without + // *transport_socket_match* set, and still send plain text traffic to the same cluster. + // + // TODO(incfly): add a detailed architecture doc on intended usage. + // [#not-implemented-hide:] + repeated TransportSocketMatch transport_socket_matches = 43; + + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // An optional alternative to the cluster name to be used while emitting stats. + // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be + // confused with :ref:`Router Filter Header + // `. + string alt_stat_name = 28; + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } + + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig eds_cluster_config = 3; + + // The timeout for new network connections to hosts in the cluster. + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + + // If the service discovery type is + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS`, + // then hosts is required. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`load_assignment` field instead. + // + repeated core.Address hosts = 7; + + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes :ref:`hosts` field. + // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` + // once load_assignment is implemented.] + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + // Setting this overrides :ref:`hosts` values. + // + ClusterLoadAssignment load_assignment = 33; + + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + repeated core.HealthCheck health_checks = 8; + + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + google.protobuf.UInt32Value max_requests_per_connection = 9; + + // Optional :ref:`circuit breaking ` for the cluster. + cluster.CircuitBreakers circuit_breakers = 10; + + // The TLS configuration for connections to the upstream cluster. If no TLS + // configuration is specified, TLS will not be used for new connections. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + auth.UpstreamTlsContext tls_context = 11; + + // Additional options when handling HTTP requests. These options will be applicable to both + // HTTP1 and HTTP2 requests. + core.HttpProtocolOptions common_http_protocol_options = 29; + + // Additional options when handling HTTP1 requests. + core.Http1ProtocolOptions http_protocol_options = 13; + + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + core.Http2ProtocolOptions http2_protocol_options = 14; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map extension_protocol_options = 35; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map typed_extension_protocol_options = 36; + + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. If this setting is not specified, the value defaults to 5000ms. For + // cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration = {gt {}}]; + + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + bool respect_dns_ttl = 39; + + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + repeated core.Address dns_resolvers = 18; + + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + cluster.OutlierDetection outlier_detection = 19; + + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + core.BindConfig upstream_bind_config = 21; + + // Configuration for load balancing subsetting. + LbSubsetConfig lb_subset_config = 22; + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; + + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; + + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + } + // Common configuration for all load balancer implementations. CommonLbConfig common_lb_config = 27; @@ -609,20 +695,11 @@ message Cluster { // the Router filter, the filter name should be specified as *envoy.router*. core.Metadata metadata = 25; - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - // Determines how Envoy selects the protocol used to speak to upstream hosts. ClusterProtocolSelection protocol_selection = 26; // Optional options for upstream connections. - envoy.api.v2.UpstreamConnectionOptions upstream_connection_options = 30; + UpstreamConnectionOptions upstream_connection_options = 30; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. @@ -656,6 +733,22 @@ message Cluster { // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. LoadBalancingPolicy load_balancing_policy = 41; + + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + core.ConfigSource lrs_server = 42; } // [#not-implemented-hide:] Extensible load balancing policy configuration. @@ -683,11 +776,14 @@ message LoadBalancingPolicy { message Policy { // Required. The name of the LB policy. string name = 1; + // Optional config for the LB policy. // No more than one of these two fields may be populated. google.protobuf.Struct config = 2; + google.protobuf.Any typed_config = 3; } + // Each client will iterate over the list in order and stop at the first policy that it // supports. This provides a mechanism for starting to use new LB policies that are not yet // supported by all clients. diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto index e36677c89b64..d2e0a328e49f 100644 --- a/api/envoy/api/v2/cluster/circuit_breaker.proto +++ b/api/envoy/api/v2/cluster/circuit_breaker.proto @@ -17,7 +17,6 @@ import "google/protobuf/wrappers.proto"; // :ref:`Circuit breaking` settings can be // specified individually for each defined priority. message CircuitBreakers { - // A Thresholds defines CircuitBreaker settings for a // :ref:`RoutingPriority`. message Thresholds { diff --git a/api/envoy/api/v2/cluster/filter.proto b/api/envoy/api/v2/cluster/filter.proto index 94c683913953..b89b2a6b778b 100644 --- a/api/envoy/api/v2/cluster/filter.proto +++ b/api/envoy/api/v2/cluster/filter.proto @@ -18,7 +18,7 @@ import "validate/validate.proto"; message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/api/v2/cluster/outlier_detection.proto b/api/envoy/api/v2/cluster/outlier_detection.proto index d457c8165f49..4702bd0a6f1c 100644 --- a/api/envoy/api/v2/cluster/outlier_detection.proto +++ b/api/envoy/api/v2/cluster/outlier_detection.proto @@ -26,26 +26,26 @@ message OutlierDetection { // The time interval between ejection analysis sweeps. This can result in // both new ejections as well as hosts being returned to service. Defaults // to 10000ms or 10s. - google.protobuf.Duration interval = 2 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the // base time multiplied by the number of times the host has been ejected. // Defaults to 30000ms or 30s. - google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; // The maximum % of an upstream cluster that can be ejected due to outlier // detection. Defaults to 10% but will eject at least one host regardless of the value. - google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive 5xx. This setting can be used to disable // ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics. This setting can be used to // disable ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; // The number of hosts in a cluster that must have enough request volume to // detect success rate outliers. If the number of hosts is less than this @@ -77,7 +77,7 @@ message OutlierDetection { // is detected through consecutive gateway failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // Determines whether to distinguish local origin failures from external errors. If set to true // the following configuration parameters are taken into account: @@ -101,7 +101,7 @@ message OutlierDetection { // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics for locally originated errors. @@ -110,12 +110,13 @@ message OutlierDetection { // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // The failure percentage to use when determining failure percentage-based outlier detection. If // the failure percentage of a given host is greater than or equal to this value, it will be // ejected. Defaults to 85. - google.protobuf.UInt32Value failure_percentage_threshold = 16 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value failure_percentage_threshold = 16 + [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // failure percentage statistics. This setting can be used to disable ejection or to ramp it up @@ -123,13 +124,14 @@ message OutlierDetection { // // [#next-major-version: setting this without setting failure_percentage_threshold should be // invalid in v4.] - google.protobuf.UInt32Value enforcing_failure_percentage = 17 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value enforcing_failure_percentage = 17 + [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // local-origin failure percentage statistics. This setting can be used to disable ejection or to // ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. // If the total number of hosts in the cluster is less than this value, failure percentage-based diff --git a/api/envoy/api/v2/core/address.proto b/api/envoy/api/v2/core/address.proto index 362395577fc9..89fd0adb1eb6 100644 --- a/api/envoy/api/v2/core/address.proto +++ b/api/envoy/api/v2/core/address.proto @@ -19,16 +19,19 @@ message Pipe { // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. - string path = 1 [(validate.rules).string.min_bytes = 1]; + string path = 1 [(validate.rules).string = {min_bytes: 1}]; } message SocketAddress { enum Protocol { TCP = 0; + // [#not-implemented-hide:] UDP = 1; } - Protocol protocol = 1 [(validate.rules).enum.defined_only = true]; + + Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; + // The address for this socket. :ref:`Listeners ` will bind // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: @@ -40,15 +43,19 @@ message SocketAddress { // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string.min_bytes = 1]; + string address = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof port_specifier { option (validate.required) = true; - uint32 port_value = 3 [(validate.rules).uint32.lte = 65535]; + + uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; + // This is only valid if :ref:`resolver_name // ` is specified below and the // named resolver is capable of named port resolution. string named_port = 4; } + // The name of the custom resolver. This must have been registered with Envoy. If // this is empty, a context dependent default applies. If the address is a concrete // IP address, no resolution will occur. If address is a hostname this @@ -68,10 +75,12 @@ message TcpKeepalive { // the connection is dead. Default is to use the OS level configuration (unless // overridden, Linux defaults to 9.) google.protobuf.UInt32Value keepalive_probes = 1; + // The number of seconds a connection needs to be idle before keep-alive probes // start being sent. Default is to use the OS level configuration (unless // overridden, Linux defaults to 7200s (ie 2 hours.) google.protobuf.UInt32Value keepalive_time = 2; + // The number of seconds between keep-alive probes. Default is to use the OS // level configuration (unless overridden, Linux defaults to 75s.) google.protobuf.UInt32Value keepalive_interval = 3; @@ -79,7 +88,7 @@ message TcpKeepalive { message BindConfig { // The address to bind to when creating a socket. - SocketAddress source_address = 1 [(validate.rules).message.required = true]; + SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; // Whether to set the *IP_FREEBIND* option when creating the socket. When this // flag is set to true, allows the :ref:`source_address @@ -103,6 +112,7 @@ message Address { option (validate.required) = true; SocketAddress socket_address = 1; + Pipe pipe = 2; } } @@ -111,7 +121,8 @@ message Address { // the subnet mask for a `CIDR `_ range. message CidrRange { // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + // Length of prefix, e.g. 0, 32. - google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32.lte = 128]; + google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; } diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto index 2a778f19afb1..eca007605750 100644 --- a/api/envoy/api/v2/core/base.proto +++ b/api/envoy/api/v2/core/base.proto @@ -7,6 +7,7 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.core"; import "envoy/api/v2/core/http_uri.proto"; +import "envoy/type/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; @@ -14,10 +15,46 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -import "envoy/type/percent.proto"; - // [#protodoc-title: Common types] +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +enum RoutingPriority { + DEFAULT = 0; + HIGH = 1; +} + +// HTTP request method. +enum RequestMethod { + METHOD_UNSPECIFIED = 0; + GET = 1; + HEAD = 2; + POST = 3; + PUT = 4; + DELETE = 5; + CONNECT = 6; + OPTIONS = 7; + TRACE = 8; + PATCH = 9; +} + +// Identifies the direction of the traffic relative to the local Envoy. +enum TrafficDirection { + // Default option is unspecified. + UNSPECIFIED = 0; + + // The transport is used for incoming traffic. + INBOUND = 1; + + // The transport is used for outgoing traffic. + OUTBOUND = 2; +} + // Identifies location of where either Envoy runs or where upstream hosts run. message Locality { // Region this :ref:`zone ` belongs to. @@ -110,52 +147,26 @@ message RuntimeUInt32 { uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string.min_bytes = 1]; -} - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -enum RoutingPriority { - DEFAULT = 0; - HIGH = 1; -} - -// HTTP request method. -enum RequestMethod { - METHOD_UNSPECIFIED = 0; - GET = 1; - HEAD = 2; - POST = 3; - PUT = 4; - DELETE = 5; - CONNECT = 6; - OPTIONS = 7; - TRACE = 8; - PATCH = 9; + string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; } // Header name/value pair. message HeaderValue { // Header name. - string key = 1 [(validate.rules).string = {min_bytes: 1, max_bytes: 16384}]; + string key = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 16384}]; // Header value. // // The same :ref:`format specifier ` as used for // :ref:`HTTP access logging ` applies here, however // unknown header values are replaced with the empty string instead of `-`. - string value = 2 [(validate.rules).string.max_bytes = 16384]; + string value = 2 [(validate.rules).string = {max_bytes: 16384}]; } // Header name/value pair plus option to control append behavior. message HeaderValueOption { // Header name/value pair that this option applies to. - HeaderValue header = 1 [(validate.rules).message.required = true]; + HeaderValue header = 1 [(validate.rules).message = {required: true}]; // Should the value be appended? If true (default), the value is appended to // existing values. @@ -173,23 +184,23 @@ message DataSource { option (validate.required) = true; // Local filesystem data source. - string filename = 1 [(validate.rules).string.min_bytes = 1]; + string filename = 1 [(validate.rules).string = {min_bytes: 1}]; // Bytes inlined in the configuration. - bytes inline_bytes = 2 [(validate.rules).bytes.min_len = 1]; + bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string.min_bytes = 1]; + string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; } } // The message specifies how to fetch data from remote and how to verify it. message RemoteDataSource { // The HTTP URI to fetch the remote data. - HttpUri http_uri = 1 [(validate.rules).message.required = true]; + HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string.min_bytes = 1]; + string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; } // Async data source which support async data fetch. @@ -212,7 +223,7 @@ message AsyncDataSource { message TransportSocket { // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. @@ -226,39 +237,47 @@ message TransportSocket { // Generic socket option message. This would be used to set socket options that // might not exist in upstream kernels or precompiled Envoy binaries. message SocketOption { + enum SocketState { + // Socket options are applied after socket creation but before binding the socket to a port + STATE_PREBIND = 0; + + // Socket options are applied after binding the socket to a port but before calling listen() + STATE_BOUND = 1; + + // Socket options are applied after calling listen() + STATE_LISTENING = 2; + } + // An optional name to give this socket option for debugging, etc. // Uniqueness is not required and no special meaning is assumed. string description = 1; + // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP int64 level = 2; + // The numeric name as passed to setsockopt int64 name = 3; + oneof value { option (validate.required) = true; // Because many sockopts take an int value. int64 int_value = 4; + // Otherwise it's a byte buffer. bytes buf_value = 5; } - enum SocketState { - // Socket options are applied after socket creation but before binding the socket to a port - STATE_PREBIND = 0; - // Socket options are applied after binding the socket to a port but before calling listen() - STATE_BOUND = 1; - // Socket options are applied after calling listen() - STATE_LISTENING = 2; - } + // The state in which the option will be applied. When used in BindConfig // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum.defined_only = true]; + SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; } // Runtime derived FractionalPercent with defaults for when the numerator or denominator is not // specified via a runtime key. message RuntimeFractionalPercent { // Default value if the runtime value's for the numerator/denominator keys are not available. - envoy.type.FractionalPercent default_value = 1 [(validate.rules).message.required = true]; + type.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; // Runtime key for a YAML representation of a FractionalPercent. string runtime_key = 2; @@ -271,15 +290,3 @@ message ControlPlane { // the Envoy is connected to. string identifier = 1; } - -// Identifies the direction of the traffic relative to the local Envoy. -enum TrafficDirection { - // Default option is unspecified. - UNSPECIFIED = 0; - - // The transport is used for incoming traffic. - INBOUND = 1; - - // The transport is used for outgoing traffic. - OUTBOUND = 2; -} diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index d86a2104f7d0..240d37b81ee2 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -23,12 +23,15 @@ message ApiConfigSource { // Ideally this would be 'reserved 0' but one can't reserve the default // value. Instead we throw an exception if this is ever used. UNSUPPORTED_REST_LEGACY = 0 [deprecated = true]; + // REST-JSON v2 API. The `canonical JSON encoding // `_ for // the v2 protos is used. REST = 1; + // gRPC v2 API. GRPC = 2; + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. @@ -37,7 +40,9 @@ message ApiConfigSource { // Do not use for other xDSes. TODO(fredlas) update/remove this warning when appropriate. DELTA_GRPC = 3; } - ApiType api_type = 1 [(validate.rules).enum.defined_only = true]; + + ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; + // Cluster names should be used only with REST. If > 1 // cluster is defined, clusters will be cycled through if any kind of failure // occurs. @@ -56,7 +61,7 @@ message ApiConfigSource { google.protobuf.Duration refresh_delay = 3; // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration.gt.seconds = 0]; + google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be // rate limited. @@ -72,6 +77,13 @@ message ApiConfigSource { message AggregatedConfigSource { } +// [#not-implemented-hide:] +// Self-referencing config source options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that other data can be obtained from the same server. +message SelfConfigSource { +} + // Rate Limit settings to be applied for discovery requests made by Envoy. message RateLimitSettings { // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a @@ -80,7 +92,7 @@ message RateLimitSettings { // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens // per second will be used. - google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double.gt = 0.0]; + google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; } // Configuration for :ref:`listeners `, :ref:`clusters @@ -89,9 +101,11 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. +// [#comment:next free field: 6] message ConfigSource { oneof config_source_specifier { option (validate.required) = true; + // Path on the filesystem to source and watch for configuration updates. // // .. note:: @@ -104,11 +118,26 @@ message ConfigSource { // are atomic. The same method of swapping files as is demonstrated in the // :ref:`runtime documentation ` can be used here also. string path = 1; + // API configuration source. ApiConfigSource api_config_source = 2; + // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. AggregatedConfigSource ads = 3; + + // [#not-implemented-hide:] + // When set, the client will access the resources from the same server it got the + // ConfigSource from, although not necessarily from the same stream. This is similar to the + // :ref:`ads` field, except that the client may use a + // different stream to the same server. As a result, this field can be used for things + // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) + // LDS to RDS on the same server without requiring the management server to know its name + // or required credentials. + // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since + // this field can implicitly mean to use the same stream in the case where the ConfigSource + // is provided via ADS and the specified data can also be obtained via ADS.] + SelfConfigSource self = 5; } // When this timeout is specified, Envoy will wait no longer than the specified time for first diff --git a/api/envoy/api/v2/core/grpc_service.proto b/api/envoy/api/v2/core/grpc_service.proto index 705c61f5a133..acd86c36da02 100644 --- a/api/envoy/api/v2/core/grpc_service.proto +++ b/api/envoy/api/v2/core/grpc_service.proto @@ -10,8 +10,8 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; import "validate/validate.proto"; @@ -24,16 +24,11 @@ message GrpcService { // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`tls_context // `. - string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } // [#proto-status: draft] message GoogleGrpc { - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string.min_bytes = 1]; - // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. message SslCredentials { // PEM encoded server root certificates. @@ -56,6 +51,7 @@ message GrpcService { message ChannelCredentials { oneof credential_specifier { option (validate.required) = true; + SslCredentials ssl_credentials = 1; // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 @@ -65,21 +61,22 @@ message GrpcService { } } - ChannelCredentials channel_credentials = 2; - message CallCredentials { message ServiceAccountJWTAccessCredentials { string json_key = 1; + uint64 token_lifetime_seconds = 2; } message GoogleIAMCredentials { string authorization_token = 1; + string authority_selector = 2; } message MetadataCredentialsFromPlugin { string name = 1; + oneof config_type { google.protobuf.Struct config = 2; @@ -117,6 +114,13 @@ message GrpcService { } } + // The target URI when using the `Google C++ gRPC client + // `_. SSL credentials will be supplied in + // :ref:`channel_credentials `. + string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; + + ChannelCredentials channel_credentials = 2; + // A set of call credentials that can be composed with `channel credentials // `_. repeated CallCredentials call_credentials = 3; @@ -130,7 +134,7 @@ message GrpcService { // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel @@ -142,6 +146,8 @@ message GrpcService { google.protobuf.Struct config = 6; } + reserved 4; + oneof target_specifier { option (validate.required) = true; @@ -160,9 +166,6 @@ message GrpcService { // request. google.protobuf.Duration timeout = 3; - // Field 4 reserved due to moving credentials inside the GoogleGrpc message - reserved 4; - // Additional metadata to include in streams initiated to the GrpcService. // This can be used for scenarios in which additional ad hoc authorization // headers (e.g. `x-foo-bar: baz-key`) are to be injected. diff --git a/api/envoy/api/v2/core/health_check.proto b/api/envoy/api/v2/core/health_check.proto index 64396f8380a4..078a5a4beef2 100644 --- a/api/envoy/api/v2/core/health_check.proto +++ b/api/envoy/api/v2/core/health_check.proto @@ -21,60 +21,40 @@ import "validate/validate.proto"; // * If health checking is configured for a cluster, additional statistics are emitted. They are // documented :ref:`here `. -message HealthCheck { - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - google.protobuf.Duration timeout = 1 [(validate.rules).duration = { - required: true, - gt: {seconds: 0} - }]; - - // The interval between health checks. - google.protobuf.Duration interval = 2 [(validate.rules).duration = { - required: true, - gt: {seconds: 0} - }]; - - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - google.protobuf.Duration initial_jitter = 20; - - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - google.protobuf.Duration interval_jitter = 3; +// Endpoint health status. +enum HealthStatus { + // The health status is not known. This is interpreted by Envoy as *HEALTHY*. + UNKNOWN = 0; - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - uint32 interval_jitter_percent = 18; + // Healthy. + HEALTHY = 1; - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4; + // Unhealthy. + UNHEALTHY = 2; - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5; + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as *UNHEALTHY*. + DRAINING = 3; - // [#not-implemented-hide:] Non-serving port for health checking. - google.protobuf.UInt32Value alt_port = 6; + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // *UNHEALTHY*. + TIMEOUT = 4; - // Reuse health check connection between health checks. Default is true. - google.protobuf.BoolValue reuse_connection = 7; + // Degraded. + DEGRADED = 5; +} +message HealthCheck { // Describes the encoding of the payload bytes in the payload. message Payload { oneof payload { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string.min_bytes = 1]; + string text = 1 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; @@ -90,7 +70,7 @@ message HealthCheck { // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. - string path = 2 [(validate.rules).string.min_bytes = 1]; + string path = 2 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; @@ -107,8 +87,8 @@ message HealthCheck { // health checked cluster. For more information, including details on header value syntax, see // the documentation on :ref:`custom request headers // `. - repeated core.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated .max_items = 1000]; + repeated HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request that is sent to the // health checked cluster. @@ -120,7 +100,7 @@ message HealthCheck { // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open // semantics of :ref:`Int64Range `. - repeated envoy.type.Int64Range expected_statuses = 9; + repeated type.Int64Range expected_statuses = 9; } message TcpHealthCheck { @@ -162,7 +142,7 @@ message HealthCheck { // Custom health check. message CustomHealthCheck { // The registered name of the custom health checker. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. @@ -173,6 +153,54 @@ message HealthCheck { } } + reserved 10; + + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + google.protobuf.Duration timeout = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // The interval between health checks. + google.protobuf.Duration interval = 2 [(validate.rules).duration = { + required: true + gt {} + }]; + + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + google.protobuf.Duration initial_jitter = 20; + + // An optional jitter amount in milliseconds. If specified, during every + // interval Envoy will add interval_jitter to the wait time. + google.protobuf.Duration interval_jitter = 3; + + // An optional jitter amount as a percentage of interval_ms. If specified, + // during every interval Envoy will add interval_ms * + // interval_jitter_percent / 100 to the wait time. + // + // If interval_jitter_ms and interval_jitter_percent are both set, both of + // them will be used to increase the wait time. + uint32 interval_jitter_percent = 18; + + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for *http* health checking if a host responds with 503 + // this threshold is ignored and the host is considered unhealthy immediately. + google.protobuf.UInt32Value unhealthy_threshold = 4; + + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + google.protobuf.UInt32Value healthy_threshold = 5; + + // [#not-implemented-hide:] Non-serving port for health checking. + google.protobuf.UInt32Value alt_port = 6; + + // Reuse health check connection between health checks. Default is true. + google.protobuf.BoolValue reuse_connection = 7; + oneof health_checker { option (validate.required) = true; @@ -189,10 +217,6 @@ message HealthCheck { CustomHealthCheck custom_health_check = 13; } - reserved 10; // redis_health_check is deprecated by :ref:`custom_health_check - // ` - reserved "redis_health_check"; - // The "no traffic interval" is a special health check interval that is used when a cluster has // never had traffic routed to it. This lower interval allows cluster information to be kept up to // date, without sending a potentially large amount of active health checking traffic for no @@ -201,14 +225,14 @@ message HealthCheck { // any other. // // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks @@ -216,14 +240,14 @@ message HealthCheck { // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; // Specifies the path to the :ref:`health check event log `. // If empty, no event log will be written. @@ -234,29 +258,3 @@ message HealthCheck { // The default value is false. bool always_log_health_check_failures = 19; } - -// Endpoint health status. -enum HealthStatus { - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0; - - // Healthy. - HEALTHY = 1; - - // Unhealthy. - UNHEALTHY = 2; - - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3; - - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - TIMEOUT = 4; - - // Degraded. - DEGRADED = 5; -} diff --git a/api/envoy/api/v2/core/http_uri.proto b/api/envoy/api/v2/core/http_uri.proto index debaa4155679..7e4b4dba43ce 100644 --- a/api/envoy/api/v2/core/http_uri.proto +++ b/api/envoy/api/v2/core/http_uri.proto @@ -22,7 +22,7 @@ message HttpUri { // // uri: https://www.googleapis.com/oauth2/v1/certs // - string uri = 1 [(validate.rules).string.min_bytes = 1]; + string uri = 1 [(validate.rules).string = {min_bytes: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or @@ -30,6 +30,7 @@ message HttpUri { // `_. oneof http_upstream_type { option (validate.required) = true; + // A cluster is created in the Envoy "cluster_manager" config // section. This field specifies the cluster name. // @@ -39,10 +40,12 @@ message HttpUri { // // cluster: jwks_cluster // - string cluster = 2 [(validate.rules).string.min_bytes = 1]; + string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 - [(validate.rules).duration.gte = {}, (validate.rules).duration.required = true]; + google.protobuf.Duration timeout = 3 [(validate.rules).duration = { + required: true + gte {} + }]; } diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto index a318ba698b59..c45bb7adf7db 100644 --- a/api/envoy/api/v2/core/protocol.proto +++ b/api/envoy/api/v2/core/protocol.proto @@ -1,5 +1,3 @@ -// [#protodoc-title: Protocol options] - syntax = "proto3"; package envoy.api.v2.core; @@ -58,7 +56,7 @@ message Http2ProtocolOptions { // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. google.protobuf.UInt32Value max_concurrent_streams = 2 - [(validate.rules).uint32 = {gte: 1, lte: 2147483647}]; + [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; // `Initial stream-level flow-control window // `_ size. Valid values range from 65535 @@ -67,17 +65,17 @@ message Http2ProtocolOptions { // // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default // window size now, so it's also the minimum. - + // // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the codec buffers. google.protobuf.UInt32Value initial_stream_window_size = 3 - [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Similar to *initial_stream_window_size*, but for connection-level flow-control // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. google.protobuf.UInt32Value initial_connection_window_size = 4 - [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Allows proxying Websocket and other upgrades over H2 connect. bool allow_connect = 5; diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto index 01982fbf6f95..15518902977a 100644 --- a/api/envoy/api/v2/eds.proto +++ b/api/envoy/api/v2/eds.proto @@ -5,7 +5,6 @@ package envoy.api.v2; option java_outer_classname = "EdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; - option java_generic_services = true; import "envoy/api/v2/discovery.proto"; @@ -13,10 +12,10 @@ import "envoy/api/v2/endpoint/endpoint.proto"; import "envoy/type/percent.proto"; import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -import "google/protobuf/wrappers.proto"; -import "google/protobuf/duration.proto"; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` @@ -48,29 +47,18 @@ service EndpointDiscoveryService { // load_balancing_weight of its locality. First, a locality will be selected, // then an endpoint within that locality will be chose based on its weight. message ClusterLoadAssignment { - // Name of the cluster. This will be the :ref:`service_name - // ` value if specified - // in the cluster :ref:`EdsClusterConfig - // `. - string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; - - // List of endpoints to load balance to. - repeated endpoint.LocalityLbEndpoints endpoints = 2; - - // Map of named endpoints that can be referenced in LocalityLbEndpoints. - map named_endpoints = 5; - // Load balancing policy settings. message Policy { - reserved 1; - message DropOverload { // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string.min_bytes = 1]; + string category = 1 [(validate.rules).string = {min_bytes: 1}]; // Percentage of traffic that should be dropped for the category. - envoy.type.FractionalPercent drop_percentage = 2; + type.FractionalPercent drop_percentage = 2; } + + reserved 1; + // Action to trim the overall incoming traffic to protect the upstream // hosts. This action allows protection in case the hosts are unable to // recover from an outage, or unable to autoscale or unable to handle @@ -106,13 +94,13 @@ message ClusterLoadAssignment { // // Read more at :ref:`priority levels ` and // :ref:`localities `. - google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32.gt = 0]; + google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; // The max time until which the endpoints from this assignment can be used. // If no new assignments are received before this time expires the endpoints // are considered stale and should be marked unhealthy. // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration.gt.seconds = 0]; + google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; // The flag to disable overprovisioning. If it is set to true, // :ref:`overprovisioning factor @@ -126,6 +114,18 @@ message ClusterLoadAssignment { bool disable_overprovisioning = 5; } + // Name of the cluster. This will be the :ref:`service_name + // ` value if specified + // in the cluster :ref:`EdsClusterConfig + // `. + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // List of endpoints to load balance to. + repeated endpoint.LocalityLbEndpoints endpoints = 2; + + // Map of named endpoints that can be referenced in LocalityLbEndpoints. + map named_endpoints = 5; + // Load balancing policy settings. Policy policy = 4; } diff --git a/api/envoy/api/v2/endpoint/endpoint.proto b/api/envoy/api/v2/endpoint/endpoint.proto index 7d614a26bb76..46875a173e83 100644 --- a/api/envoy/api/v2/endpoint/endpoint.proto +++ b/api/envoy/api/v2/endpoint/endpoint.proto @@ -18,6 +18,17 @@ import "validate/validate.proto"; // Upstream host identifier. message Endpoint { + // The optional health check configuration. + message HealthCheckConfig { + // Optional alternative health check port value. + // + // By default the health check address port of an upstream host is the same + // as the host's serving address port. This provides an alternative health + // check port. Setting this with a non-zero value allows an upstream host + // to have different health check address port. + uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; + } + // The upstream host address. // // .. attention:: @@ -29,17 +40,6 @@ message Endpoint { // and will be resolved via DNS. core.Address address = 1; - // The optional health check configuration. - message HealthCheckConfig { - // Optional alternative health check port value. - // - // By default the health check address port of an upstream host is the same - // as the host's serving address port. This provides an alternative health - // check port. Setting this with a non-zero value allows an upstream host - // to have different health check address port. - uint32 port_value = 1 [(validate.rules).uint32.lte = 65535]; - } - // The optional health check configuration is used as configuration for the // health checker to contact the health checked host. // @@ -55,6 +55,7 @@ message LbEndpoint { // Upstream host identifier or a named reference. oneof host_identifier { Endpoint endpoint = 1; + string endpoint_name = 5; } diff --git a/api/envoy/api/v2/lds.proto b/api/envoy/api/v2/lds.proto index aec4ad85aded..0ea940f6c476 100644 --- a/api/envoy/api/v2/lds.proto +++ b/api/envoy/api/v2/lds.proto @@ -5,7 +5,6 @@ package envoy.api.v2; option java_outer_classname = "LdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; - option java_generic_services = true; import "envoy/api/v2/core/address.proto"; @@ -13,6 +12,7 @@ import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/discovery.proto"; import "envoy/api/v2/listener/listener.proto"; import "envoy/api/v2/listener/udp_listener_config.proto"; +import "envoy/config/listener/v2/api_listener.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; @@ -42,8 +42,36 @@ service ListenerDiscoveryService { } } -// [#comment:next free field: 19] +// [#comment:next free field: 20] message Listener { + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // [#not-implemented-hide:] + message DeprecatedV1 { + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated in v2, all Listeners will bind to their port. An + // additional filter chain must be created for every original destination + // port this listener may redirect to in v2, with the original port + // specified in the FilterChainMatch destination_port field. + // + // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] + google.protobuf.BoolValue bind_to_port = 1; + } + + reserved 14; + // The unique name by which this listener is known. If no name is provided, // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically // updated or removed via :ref:`LDS ` a unique name must be provided. @@ -52,7 +80,7 @@ message Listener { // The address that the listener should listen on. In general, the address must be unique, though // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on // Linux as the actual port will be allocated by the OS. - core.Address address = 2 [(validate.rules).message.required = true]; + core.Address address = 2 [(validate.rules).message = {required: true}]; // A list of filter chains to consider for this listener. The // :ref:`FilterChain ` with the most specific @@ -87,34 +115,9 @@ message Listener { // Listener metadata. core.Metadata metadata = 6; - // [#not-implemented-hide:] - message DeprecatedV1 { - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // [V2-API-DIFF] This is deprecated in v2, all Listeners will bind to their - // port. An additional filter chain must be created for every original - // destination port this listener may redirect to in v2, with the original - // port specified in the FilterChainMatch destination_port field. - // - // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] - google.protobuf.BoolValue bind_to_port = 1; - } - // [#not-implemented-hide:] DeprecatedV1 deprecated_v1 = 7; - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - // The type of draining to perform at a listener-wide level. DrainType drain_type = 8; @@ -188,8 +191,6 @@ message Listener { // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - reserved 14; - // Specifies the intended direction of the traffic relative to the local Envoy. core.TrafficDirection traffic_direction = 16; @@ -200,4 +201,17 @@ message Listener { // ` = "raw_udp_listener" for // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". listener.UdpListenerConfig udp_listener_config = 18; + + // [#not-implemented-hide:] + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + config.listener.v2.ApiListener api_listener = 19; } diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto index f6dcecc70805..949075840ddf 100644 --- a/api/envoy/api/v2/listener/listener.proto +++ b/api/envoy/api/v2/listener/listener.proto @@ -6,10 +6,10 @@ option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy::Api::V2::ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; -import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; @@ -22,9 +22,11 @@ import "validate/validate.proto"; // Listener :ref:`configuration overview ` message Filter { + reserved 3; + // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. @@ -33,8 +35,6 @@ message Filter { google.protobuf.Any typed_config = 4; } - - reserved 3; } // Specifies the match criteria for selecting a specific filter chain for a @@ -66,9 +66,22 @@ message Filter { // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] message FilterChainMatch { + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + + // Match a connection originating from the same host. + LOCAL = 1; + + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + reserved 1; + // Optional destination port to consider when use_original_dst is set on the // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {gte: 1, lte: 65535}]; + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; // If non-empty, an IP address and prefix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. @@ -82,17 +95,8 @@ message FilterChainMatch { // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - // Match a connection originating from the same host. - LOCAL = 1; - // Match a connection originating from a different host. - EXTERNAL = 2; - } - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum.defined_only = true]; + ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; // The criteria is satisfied if the source IP address of the downstream // connection is contained in at least one of the specified subnets. If the @@ -103,7 +107,8 @@ message FilterChainMatch { // The criteria is satisfied if the source port of the downstream connection // is contained in at least one of the specified ports. If the parameter is // not specified, the source port is ignored. - repeated uint32 source_ports = 7 [(validate.rules).repeated .items.uint32 = {gte: 1, lte: 65535}]; + repeated uint32 source_ports = 7 + [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining // a filter chain match. Those values will be compared against the server names of a new @@ -151,9 +156,6 @@ message FilterChainMatch { // and matching on values other than ``h2`` is going to lead to a lot of false negatives, // unless all connecting clients are known to use ALPN. repeated string application_protocols = 10; - - reserved 1; - reserved "sni_domains"; } // A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and @@ -194,7 +196,7 @@ message FilterChain { message ListenerFilter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. diff --git a/api/envoy/api/v2/listener/quic_config.proto b/api/envoy/api/v2/listener/quic_config.proto index 95ffc3cdf319..1f67a4f7000a 100644 --- a/api/envoy/api/v2/listener/quic_config.proto +++ b/api/envoy/api/v2/listener/quic_config.proto @@ -2,11 +2,11 @@ syntax = "proto3"; package envoy.api.v2.listener; -option java_outer_classname = "ListenerProto"; +option java_outer_classname = "QuicConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy::Api::V2::ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/api/v2/listener/udp_listener_config.proto b/api/envoy/api/v2/listener/udp_listener_config.proto index 28d8233f5ff0..4b489b99884c 100644 --- a/api/envoy/api/v2/listener/udp_listener_config.proto +++ b/api/envoy/api/v2/listener/udp_listener_config.proto @@ -6,10 +6,10 @@ option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy::Api::V2::ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; -import "google/protobuf/struct.proto"; import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; // [#protodoc-title: Udp Listener Config] // Listener :ref:`configuration overview ` diff --git a/api/envoy/api/v2/ratelimit/ratelimit.proto b/api/envoy/api/v2/ratelimit/ratelimit.proto index 6f4cd6258283..af910e3938ba 100644 --- a/api/envoy/api/v2/ratelimit/ratelimit.proto +++ b/api/envoy/api/v2/ratelimit/ratelimit.proto @@ -54,12 +54,12 @@ import "validate/validate.proto"; message RateLimitDescriptor { message Entry { // Descriptor key. - string key = 1 [(validate.rules).string.min_bytes = 1]; + string key = 1 [(validate.rules).string = {min_bytes: 1}]; // Descriptor value. - string value = 2 [(validate.rules).string.min_bytes = 1]; + string value = 2 [(validate.rules).string = {min_bytes: 1}]; } // Descriptor entries. - repeated Entry entries = 1 [(validate.rules).repeated .min_items = 1]; + repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto index 9fabaf28af80..120c4bd4e32f 100644 --- a/api/envoy/api/v2/rds.proto +++ b/api/envoy/api/v2/rds.proto @@ -5,7 +5,6 @@ package envoy.api.v2; option java_outer_classname = "RdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; - option java_generic_services = true; import "envoy/api/v2/core/base.proto"; @@ -90,7 +89,7 @@ message RouteConfiguration { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // that the connection manager encodes. @@ -103,7 +102,7 @@ message RouteConfiguration { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // routed by the HTTP connection manager. @@ -128,5 +127,5 @@ message RouteConfiguration { // [#not-implemented-hide:] message Vhds { // Configuration source specifier for VHDS. - envoy.api.v2.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; + core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index 56321146cc64..1678c0ef9020 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -5,7 +5,6 @@ package envoy.api.v2.route; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2.route"; -option java_generic_services = true; import "envoy/api/v2/core/base.proto"; import "envoy/type/matcher/regex.proto"; @@ -31,9 +30,24 @@ import "validate/validate.proto"; // upstream cluster to route to or whether to perform a redirect. // [#comment:next free field: 17] message VirtualHost { + enum TlsRequirementType { + // No TLS requirement for the virtual host. + NONE = 0; + + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + EXTERNAL_ONLY = 1; + + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + ALL = 2; + } + + reserved 9; + // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. @@ -51,25 +65,12 @@ message VirtualHost { // The longest wildcards match first. // Only a single virtual host in the entire route configuration can match on ``*``. A domain // must be unique across all virtual hosts or the config will fail to load. - repeated string domains = 2 [(validate.rules).repeated .min_items = 1]; + repeated string domains = 2 [(validate.rules).repeated = {min_items: 1}]; // The list of routes that will be matched, in order, for incoming requests. // The first route that matches will be used. repeated Route routes = 3; - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - // Specifies the type of TLS enforcement the virtual host expects. If this option is not // specified, there is no TLS requirement for the virtual host. TlsRequirementType require_tls = 4; @@ -89,7 +90,7 @@ message VirtualHost { // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. @@ -102,7 +103,7 @@ message VirtualHost { // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. @@ -111,8 +112,6 @@ message VirtualHost { // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; - reserved 9; - // The per_filter_config field can be used to provide virtual host-specific // configurations for filters. The key should match the filter name, such as // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter @@ -157,11 +156,13 @@ message VirtualHost { // `. // [#comment:next free field: 15] message Route { + reserved 6; + // Name for the route. string name = 14; // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message.required = true]; + RouteMatch match = 1 [(validate.rules).message = {required: true}]; oneof action { option (validate.required) = true; @@ -186,8 +187,6 @@ message Route { // Decorator for the matched route. Decorator decorator = 5; - reserved 6; - // The per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter @@ -209,7 +208,7 @@ message Route { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // matching this route. @@ -222,7 +221,7 @@ message Route { // details on header value syntax, see the documentation on // :ref:`custom request headers `. repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. @@ -242,9 +241,11 @@ message Route { // [#comment:next free field: 11] message WeightedCluster { message ClusterWeight { + reserved 7; + // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, @@ -267,7 +268,7 @@ message WeightedCluster { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. @@ -281,14 +282,12 @@ message WeightedCluster { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. repeated string response_headers_to_remove = 6; - reserved 7; - // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter @@ -305,11 +304,11 @@ message WeightedCluster { } // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; // Specifies the total weight across all clusters. The sum of all cluster weights must equal this // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32.gte = 1]; + google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; // Specifies the runtime key prefix that should be used to construct the // runtime keys associated with each cluster. When the *runtime_key_prefix* is @@ -323,6 +322,11 @@ message WeightedCluster { } message RouteMatch { + message GrpcRouteMatchOptions { + } + + reserved 5; + oneof path_specifier { option (validate.required) = true; @@ -342,15 +346,15 @@ message RouteMatch { // // Examples: // - // * The regex */b[io]t* matches the path */bit* - // * The regex */b[io]t* matches the path */bot* - // * The regex */b[io]t* does not match the path */bite* - // * The regex */b[io]t* does not match the path */bit/bot* + // * The regex ``/b[io]t`` matches the path */bit* + // * The regex ``/b[io]t`` matches the path */bot* + // * The regex ``/b[io]t`` does not match the path */bite* + // * The regex ``/b[io]t`` does not match the path */bit/bot* // // .. attention:: // This field has been deprecated in favor of `safe_regex` as it is not safe for use with // untrusted input in all cases. - string regex = 3 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string regex = 3 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // If specified, the route is a regular expression rule meaning that the // regex must match the *:path* header once the query string is removed. The entire path @@ -364,15 +368,13 @@ message RouteMatch { // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] - type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message.required = true]; + type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; } // Indicates that prefix/path matching should be case insensitive. The default // is true. google.protobuf.BoolValue case_sensitive = 4; - reserved 5; - // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the @@ -405,9 +407,6 @@ message RouteMatch { // query string for a match to occur. repeated QueryParameterMatcher query_parameters = 7; - message GrpcRouteMatchOptions { - } - // If specified, only gRPC requests will be matched. The router will check // that the content-type header has a application/grpc or one of the various // application/grpc+ values. @@ -432,7 +431,7 @@ message CorsPolicy { // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for // use with untrusted input in all cases. repeated string allow_origin_regex = 8 - [(validate.rules).repeated .items.string.max_bytes = 1024, deprecated = true]; + [(validate.rules).repeated = {items {string {max_bytes: 1024}}}, deprecated = true]; // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. @@ -489,12 +488,172 @@ message CorsPolicy { // [#comment:next free field: 30] message RouteAction { + enum ClusterNotFoundResponseCode { + // HTTP status code - 503 Service Unavailable. + SERVICE_UNAVAILABLE = 0; + + // HTTP status code - 404 Not Found. + NOT_FOUND = 1; + } + + // Configures :ref:`internal redirect ` behavior. + enum InternalRedirectAction { + PASS_THROUGH_INTERNAL_REDIRECT = 0; + HANDLE_INTERNAL_REDIRECT = 1; + } + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is + // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. + message RequestMirrorPolicy { + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If not specified, all requests to the target cluster will be mirrored. If + // specified, Envoy will lookup the runtime key to get the % of requests to + // mirror. Valid values are from 0 to 10000, allowing for increments of + // 0.01% of requests to be mirrored. If the runtime key is specified in the + // configuration but not present in runtime, 0 is the default and thus 0% of + // requests will be mirrored. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`runtime_fraction + // ` field instead. + string runtime_key = 2 [deprecated = true]; + + // If both :ref:`runtime_key + // ` and this field are not + // specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the `runtime_key` field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a :ref:`FractionalPercent ` proto represented + // as JSON/YAML and may also be represented as an integer with the assumption that the value + // is an integral percentage out of 100. For instance, a runtime key lookup returning the + // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is + // HUNDRED. This is behaviour is different to that of the deprecated `runtime_key` field, + // where the implicit denominator is 10000. + core.RuntimeFractionalPercent runtime_fraction = 3; + } + + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + // `. + message HashPolicy { + message Header { + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // Envoy supports two types of cookie affinity: + // + // 1. Passive. Envoy takes a cookie that's present in the cookies header and + // hashes on its value. + // + // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + // on the first request from the client in its response to the client, + // based on the endpoint the request gets sent to. The client then + // presents this on the next and all subsequent requests. The hash of + // this is sufficient to ensure these requests get sent to the same + // endpoint. The cookie is generated by hashing the source and + // destination ports and addresses so that multiple independent HTTP2 + // streams on the same connection will independently receive the same + // cookie, even if they arrive at the Envoy simultaneously. + message Cookie { + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + google.protobuf.Duration ttl = 2; + + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + string path = 3; + } + + message ConnectionProperties { + // Hash on source IP address. + bool source_ip = 1; + } + + oneof policy_specifier { + option (validate.required) = true; + + // Header hash policy. + Header header = 1; + + // Cookie hash policy. + Cookie cookie = 2; + + // Connection properties hash policy. + ConnectionProperties connection_properties = 3; + } + + // The flag that shortcircuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; + } + + // Allows enabling and disabling upgrades on a per-route basis. + // This overrides any enabled/disabled upgrade filter chain specified in the + // HttpConnectionManager + // :ref:upgrade_configs` + // ` + // but does not affect any custom filter chain specified there. + message UpgradeConfig { + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + string upgrade_type = 1; + + // Determines if upgrades are available on this route. Defaults to true. + google.protobuf.BoolValue enabled = 2; + } + + reserved 12, 18, 19, 16, 22, 21; + oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the @@ -505,7 +664,7 @@ message RouteAction { // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - string cluster_header = 2 [(validate.rules).string.min_bytes = 1]; + string cluster_header = 2 [(validate.rules).string = {min_bytes: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -515,18 +674,10 @@ message RouteAction { WeightedCluster weighted_clusters = 3; } - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - // The HTTP status code to use when configured cluster is not found. // The default response code is 503 Service Unavailable. ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum.defined_only = true]; + [(validate.rules).enum = {defined_only: true}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what's set in this field will be considered @@ -625,55 +776,6 @@ message RouteAction { // (e.g.: policies are not merged, most internal one becomes the enforced policy). RetryPolicy retry_policy = 9; - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; - - // If not specified, all requests to the target cluster will be mirrored. If - // specified, Envoy will lookup the runtime key to get the % of requests to - // mirror. Valid values are from 0 to 10000, allowing for increments of - // 0.01% of requests to be mirrored. If the runtime key is specified in the - // configuration but not present in runtime, 0 is the default and thus 0% of - // requests will be mirrored. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`runtime_fraction - // ` field instead. - string runtime_key = 2 [deprecated = true]; - - // If both :ref:`runtime_key - // ` and this field are not - // specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a :ref:`FractionalPercent ` proto represented - // as JSON/YAML and may also be represented as an integer with the assumption that the value - // is an integral percentage out of 100. For instance, a runtime key lookup returning the - // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is - // HUNDRED. This is behaviour is different to that of the deprecated `runtime_key` field, - // where the implicit denominator is 10000. - core.RuntimeFractionalPercent runtime_fraction = 3; - } - // Indicates that the route has a request mirroring policy. RequestMirrorPolicy request_mirror_policy = 10; @@ -682,10 +784,6 @@ message RouteAction { // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] core.RoutingPriority priority = 11; - reserved 12; - reserved 18; - reserved 19; - // Specifies a set of rate limit configurations that could be applied to the // route. repeated RateLimit rate_limits = 13; @@ -696,85 +794,6 @@ message RouteAction { // request. google.protobuf.BoolValue include_vh_rate_limits = 14; - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - message HashPolicy { - message Header { - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 [(validate.rules).string.min_bytes = 1]; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - // Hash on source IP address. - bool source_ip = 1; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - } - - // The flag that shortcircuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to // route the request. The method of combination is deterministic such that @@ -789,14 +808,9 @@ message RouteAction { // ignoring the rest of the hash policy list. repeated HashPolicy hash_policy = 15; - reserved 16; - reserved 22; - // Indicates that the route has a CORS policy. CorsPolicy cors = 17; - reserved 21; - // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, // or its default value (infinity) instead of @@ -818,27 +832,8 @@ message RouteAction { // infinity). google.protobuf.Duration grpc_timeout_offset = 28; - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:upgrade_configs` - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1; - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - }; repeated UpgradeConfig upgrade_configs = 25; - // Configures :ref:`internal redirect ` behavior. - enum InternalRedirectAction { - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } InternalRedirectAction internal_redirect_action = 26; // Indicates that the route has a hedge policy. Note that if this is set, @@ -850,6 +845,43 @@ message RouteAction { // HTTP retry :ref:`architecture overview `. // [#comment:next free field: 10] message RetryPolicy { + message RetryPriority { + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + message RetryHostPredicate { + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + message RetryBackOff { + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the `base_interval` if set. The default is 10 times the + // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; + } + // Specifies the conditions under which retry takes place. These are the same // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. @@ -873,29 +905,11 @@ message RetryPolicy { // would have been exhausted. google.protobuf.Duration per_try_timeout = 3; - message RetryPriority { - string name = 1 [(validate.rules).string.min_bytes = 1]; - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; - } - } - // Specifies an implementation of a RetryPriority which is used to determine the // distribution of load across priorities used for retries. Refer to // :ref:`retry plugin configuration ` for more details. RetryPriority retry_priority = 4; - message RetryHostPredicate { - string name = 1 [(validate.rules).string.min_bytes = 1]; - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; - } - } - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host // for retries. If any of the predicates reject the host, host selection will be reattempted. // Refer to :ref:`retry plugin configuration ` for more @@ -910,23 +924,6 @@ message RetryPolicy { // HTTP status codes that should trigger a retry in addition to those specified by retry_on. repeated uint32 retriable_status_codes = 7; - message RetryBackOff { - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true, - gt: {seconds: 0} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration.gt = {seconds: 0}]; - } - // Specifies parameters that control retry back off. This parameter is optional, in which case the // default base interval is 25 milliseconds or, if set, the current value of the // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times @@ -946,13 +943,13 @@ message HedgePolicy { // Must be at least 1. // Defaults to 1. // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32.gte = 1]; + google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; // Specifies a probability that an additional upstream request should be sent // on top of what is specified by initial_requests. // Defaults to 0. // [#not-implemented-hide:] - envoy.type.FractionalPercent additional_request_chance = 2; + type.FractionalPercent additional_request_chance = 2; // Indicates that a hedged request should be sent when the per-try timeout // is hit. This will only occur if the retry policy also indicates that a @@ -966,6 +963,23 @@ message HedgePolicy { } message RedirectAction { + enum RedirectResponseCode { + // Moved Permanently HTTP Status Code - 301. + MOVED_PERMANENTLY = 0; + + // Found HTTP Status Code - 302. + FOUND = 1; + + // See Other HTTP Status Code - 303. + SEE_OTHER = 2; + + // Temporary Redirect HTTP Status Code - 307. + TEMPORARY_REDIRECT = 3; + + // Permanent Redirect HTTP Status Code - 308. + PERMANENT_REDIRECT = 4; + } + // When the scheme redirection take place, the following rules apply: // 1. If the source URI scheme is `http` and the port is explicitly // set to `:80`, the port will be removed after the redirection @@ -974,11 +988,14 @@ message RedirectAction { oneof scheme_rewrite_specifier { // The scheme portion of the URL will be swapped with "https". bool https_redirect = 4; + // The scheme portion of the URL will be swapped with this value. string scheme_redirect = 7; } + // The host portion of the URL will be swapped with this value. string host_redirect = 1; + // The port value of the URL will be swapped with this value. uint32 port_redirect = 8; @@ -997,26 +1014,9 @@ message RedirectAction { string prefix_rewrite = 5; } - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - // The HTTP status code to use in the redirect response. The default response // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum.defined_only = true]; + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; // Indicates that during redirection, the query portion of the URL will // be removed. Default value is false. @@ -1025,7 +1025,7 @@ message RedirectAction { message DirectResponseAction { // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {gte: 100, lt: 600}]; + uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; // Specifies the content of the response body. If this setting is omitted, // no body is included in the generated response. @@ -1047,25 +1047,24 @@ message Decorator { // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. - string operation = 1 [(validate.rules).string.min_bytes = 1]; + string operation = 1 [(validate.rules).string = {min_bytes: 1}]; } message Tracing { - // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% - envoy.type.FractionalPercent client_sampling = 1; + type.FractionalPercent client_sampling = 1; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.FractionalPercent random_sampling = 2; + type.FractionalPercent random_sampling = 2; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random @@ -1075,7 +1074,7 @@ message Tracing { // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.FractionalPercent overall_sampling = 3; + type.FractionalPercent overall_sampling = 3; } // A virtual cluster is a way of specifying a regex matching rule against @@ -1102,14 +1101,14 @@ message VirtualCluster { // // Examples: // - // * The regex */rides/\d+* matches the path */rides/0* - // * The regex */rides/\d+* matches the path */rides/123* - // * The regex */rides/\d+* does not match the path */rides/123/456* + // * The regex ``/rides/\d+`` matches the path */rides/0* + // * The regex ``/rides/\d+`` matches the path */rides/123* + // * The regex ``/rides/\d+`` does not match the path */rides/123/456* // // .. attention:: // This field has been deprecated in favor of `headers` as it is not safe for use with // untrusted input in all cases. - string pattern = 1 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string pattern = 1 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // Specifies a list of header matchers to use for matching requests. Each specified header must // match. The pseudo-headers `:path` and `:method` can be used to match the request path and @@ -1119,7 +1118,7 @@ message VirtualCluster { // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string.min_bytes = 1]; + string name = 2 [(validate.rules).string = {min_bytes: 1}]; // Optionally specifies the HTTP method to match on. For example GET, PUT, // etc. @@ -1131,18 +1130,6 @@ message VirtualCluster { // Global rate limiting :ref:`architecture overview `. message RateLimit { - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32.lte = 10]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - message Action { // The following descriptor entry is appended to the descriptor: // @@ -1183,10 +1170,10 @@ message RateLimit { // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. - string header_name = 1 [(validate.rules).string.min_bytes = 1]; + string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string.min_bytes = 1]; + string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; } // The following descriptor entry is appended to the descriptor and is populated using the @@ -1205,7 +1192,7 @@ message RateLimit { // ("generic_key", "") message GenericKey { // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; } // The following descriptor entry is appended to the descriptor: @@ -1215,7 +1202,7 @@ message RateLimit { // ("header_match", "") message HeaderValueMatch { // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a @@ -1228,7 +1215,7 @@ message RateLimit { // specified headers in the config. A match will happen if all the // headers in the config are present in the request with the same values // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated .min_items = 1]; + repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } oneof action_specifier { @@ -1254,13 +1241,25 @@ message RateLimit { } } + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + // A list of actions that are to be applied for this rate limit configuration. // Order matters as the actions are processed sequentially and the descriptor // is composed by appending descriptor entries in that sequence. If an action // cannot append a descriptor entry, no descriptor is generated for the // configuration. See :ref:`composing actions // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated .min_items = 1]; + repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; } // .. attention:: @@ -1288,14 +1287,10 @@ message RateLimit { // // [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] message HeaderMatcher { - // Specifies the name of the header in the request. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - reserved 2; // value deprecated by :ref:`exact_match - // ` + reserved 2, 3; - reserved 3; // regex deprecated by :ref:`regex_match - // ` + // Specifies the name of the header in the request. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { @@ -1309,14 +1304,14 @@ message HeaderMatcher { // // Examples: // - // * The regex *\d{3}* matches the value *123* - // * The regex *\d{3}* does not match the value *1234* - // * The regex *\d{3}* does not match the value *123.456* + // * The regex ``\d{3}`` matches the value *123* + // * The regex ``\d{3}`` does not match the value *1234* + // * The regex ``\d{3}`` does not match the value *123.456* // // .. attention:: // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use // with untrusted input in all cases. - string regex_match = 5 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string regex_match = 5 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the @@ -1334,7 +1329,7 @@ message HeaderMatcher { // // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, // "-1somestring" - envoy.type.Int64Range range_match = 6; + type.Int64Range range_match = 6; // If specified, header match will be performed based on whether the header is in the // request. @@ -1346,7 +1341,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string.min_bytes = 1]; + string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1354,14 +1349,14 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string.min_bytes = 1]; + string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; } // If specified, the match result will be inverted before checking. Defaults to false. // // Examples: // - // * The regex *\d{3}* does not match the value *1234*, so it will match when inverted. + // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. // * The range [-10,0) will match the value -1, so it will not match when inverted. bool invert_match = 8; } @@ -1371,7 +1366,7 @@ message HeaderMatcher { message QueryParameterMatcher { // Specifies the name of a key that must be present in the requested // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1, max_bytes: 1024}]; + string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; // Specifies the value of the key. If the value is absent, a request // that contains the key in its query string will match, whether the @@ -1384,7 +1379,7 @@ message QueryParameterMatcher { // Specifies whether the query parameter value is a regular expression. // Defaults to false. The entire query parameter value (i.e., the part to // the right of the equals sign in "key=value") must match the regex. - // E.g., the regex "\d+$" will match "123" but not "a123" or "123a". + // E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. // // ..attention:: // This field is deprecated. Use a `safe_regex` match inside the `string_match` field. @@ -1392,7 +1387,7 @@ message QueryParameterMatcher { oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. - type.matcher.StringMatcher string_match = 5 [(validate.rules).message.required = true]; + type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; diff --git a/api/envoy/api/v2/srds.proto b/api/envoy/api/v2/srds.proto index a51426af01b7..0d74594cfe48 100644 --- a/api/envoy/api/v2/srds.proto +++ b/api/envoy/api/v2/srds.proto @@ -2,15 +2,17 @@ syntax = "proto3"; package envoy.api.v2; -import "envoy/api/v2/discovery.proto"; -import "google/api/annotations.proto"; -import "validate/validate.proto"; - option java_outer_classname = "SrdsProto"; -option java_package = "io.envoyproxy.envoy.api.v2"; option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v2"; option java_generic_services = true; +import "envoy/api/v2/discovery.proto"; + +import "google/api/annotations.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` // @@ -99,9 +101,6 @@ service ScopedRoutesDiscoveryService { // [#comment:next free field: 4] // [#proto-status: experimental] message ScopedRouteConfiguration { - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string.min_bytes = 1]; - // Specifies a key which is matched against the output of the // :ref:`scope_key_builder` // specified in the HttpConnectionManager. The matching is done per HTTP @@ -120,14 +119,17 @@ message ScopedRouteConfiguration { // The ordered set of fragments to match against. The order must match the // fragments in the corresponding // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated .min_items = 1]; + repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } + // The name assigned to the routing scope. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated // with this scope. - string route_configuration_name = 2 [(validate.rules).string.min_bytes = 1]; + string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; // The key to match against. - Key key = 3 [(validate.rules).message.required = true]; + Key key = 3 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/api/v3alpha/BUILD b/api/envoy/api/v3alpha/BUILD index e61a715ab9de..87f1f03a3b1d 100644 --- a/api/envoy/api/v3alpha/BUILD +++ b/api/envoy/api/v3alpha/BUILD @@ -27,6 +27,7 @@ api_proto_package( "//envoy/api/v3alpha/listener:pkg", "//envoy/api/v3alpha/ratelimit:pkg", "//envoy/api/v3alpha/route:pkg", + "//envoy/config/listener/v3alpha:pkg", "//envoy/type", ], ) @@ -34,6 +35,7 @@ api_proto_package( api_proto_library_internal( name = "discovery", srcs = ["discovery.proto"], + has_services = 1, visibility = [":friends"], deps = ["//envoy/api/v3alpha/core:base"], ) @@ -86,6 +88,7 @@ api_proto_library_internal( "//envoy/api/v3alpha/core:base", "//envoy/api/v3alpha/listener", "//envoy/api/v3alpha/listener:udp_listener_config", + "//envoy/config/listener/v3alpha:api_listener", ], ) diff --git a/api/envoy/api/v3alpha/auth/cert.proto b/api/envoy/api/v3alpha/auth/cert.proto index 83897b268320..39596e012704 100644 --- a/api/envoy/api/v3alpha/auth/cert.proto +++ b/api/envoy/api/v3alpha/auth/cert.proto @@ -36,11 +36,11 @@ message TlsParameters { } // Minimum TLS protocol version. By default, it's ``TLSv1_0``. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum.defined_only = true]; + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum.defined_only = true]; + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list // `_ @@ -106,7 +106,7 @@ message TlsParameters { message PrivateKeyProvider { // Private key method provider name. The name must match a // supported private key method provider type. - string provider_name = 1 [(validate.rules).string.min_bytes = 1]; + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; // Private key method provider specific configuration. oneof config_type { @@ -167,7 +167,7 @@ message TlsSessionTicketKeys { // * Keep the session ticket keys at least as secure as your TLS certificate private keys // * Rotate session ticket keys at least daily, and preferably hourly // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 [(validate.rules).repeated .min_items = 1]; + repeated core.DataSource keys = 1 [(validate.rules).repeated = {min_items: 1}]; } message CertificateValidationContext { @@ -201,9 +201,9 @@ message CertificateValidationContext { // // .. code-block:: bash // - // $ openssl x509 -in path/to/client.crt -noout -pubkey \ - // | openssl pkey -pubin -outform DER \ - // | openssl dgst -sha256 -binary \ + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary // | openssl enc -base64 // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= // @@ -223,7 +223,7 @@ message CertificateValidationContext { // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 - [(validate.rules).repeated .items.string = {min_bytes: 44, max_bytes: 44}]; + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. @@ -252,7 +252,7 @@ message CertificateValidationContext { // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 - [(validate.rules).repeated .items.string = {min_bytes: 64, max_bytes: 95}]; + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative Names. If specified, Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified values. @@ -283,6 +283,18 @@ message CertificateValidationContext { // TLS context shared by both client and server TLS contexts. message CommonTlsContext { + message CombinedCertificateValidationContext { + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + // TLS protocol versions, cipher suites etc. TlsParameters tls_params = 1; @@ -296,17 +308,7 @@ message CommonTlsContext { // Configs for fetching TLS certificates via SDS API. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated .max_items = 1]; - - message CombinedCertificateValidationContext { - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message.required = true]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message.required = true]; - }; + [(validate.rules).repeated = {max_items: 1}]; oneof validation_context_type { // How to validate peer certificates. @@ -336,8 +338,6 @@ message CommonTlsContext { // // There is no default for this parameter. If empty, Envoy will not expose ALPN. repeated string alpn_protocols = 4; - - reserved 5; } message UpstreamTlsContext { @@ -345,7 +345,7 @@ message UpstreamTlsContext { CommonTlsContext common_tls_context = 1; // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string.max_bytes = 255]; + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; // If true, server-initiated TLS renegotiation will be allowed. // @@ -386,8 +386,10 @@ message DownstreamTlsContext { message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via SDS. - // When only name is specified, then secret will be loaded from static resources [V2-API-DIFF]. + // When only name is specified, then secret will be loaded from static + // resources. string name = 1; + core.ConfigSource sds_config = 2; } @@ -395,9 +397,12 @@ message SdsSecretConfig { message Secret { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. string name = 1; + oneof type { TlsCertificate tls_certificate = 2; + TlsSessionTicketKeys session_ticket_keys = 3; + CertificateValidationContext validation_context = 4; } } diff --git a/api/envoy/api/v3alpha/cds.proto b/api/envoy/api/v3alpha/cds.proto index 506224a7ba46..8d5f9faca51b 100644 --- a/api/envoy/api/v3alpha/cds.proto +++ b/api/envoy/api/v3alpha/cds.proto @@ -5,19 +5,18 @@ package envoy.api.v3alpha; option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha"; - option java_generic_services = true; -import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/auth/cert.proto"; +import "envoy/api/v3alpha/cluster/circuit_breaker.proto"; +import "envoy/api/v3alpha/cluster/filter.proto"; +import "envoy/api/v3alpha/cluster/outlier_detection.proto"; +import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/core/config_source.proto"; -import "envoy/api/v3alpha/discovery.proto"; import "envoy/api/v3alpha/core/health_check.proto"; import "envoy/api/v3alpha/core/protocol.proto"; -import "envoy/api/v3alpha/cluster/circuit_breaker.proto"; -import "envoy/api/v3alpha/cluster/filter.proto"; -import "envoy/api/v3alpha/cluster/outlier_detection.proto"; +import "envoy/api/v3alpha/discovery.proto"; import "envoy/api/v3alpha/eds.proto"; import "envoy/type/percent.proto"; @@ -29,6 +28,8 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; +// [#protodoc-title: Clusters] + // Return list of all clusters this proxy will load balance to. service ClusterDiscoveryService { rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { @@ -45,24 +46,9 @@ service ClusterDiscoveryService { } } -// [#protodoc-title: Clusters] - // Configuration for a single upstream cluster. -// [#comment:next free field: 41] +// [#comment:next free field: 44] message Cluster { - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // An optional alternative to the cluster name to be used while emitting stats. - // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be - // confused with :ref:`Router Filter Header - // `. - string alt_stat_name = 28; - // Refer to :ref:`service discovery type ` // for an explanation on each type. enum DiscoveryType { @@ -90,45 +76,6 @@ message Cluster { ORIGINAL_DST = 4; } - // Extended cluster type. - message CustomClusterType { - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - google.protobuf.Any typed_config = 2; - } - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - // Configuration for the source of EDS updates for this Cluster. - core.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; - } - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration.gt = {}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; - // Refer to :ref:`load balancer type ` architecture // overview section for information on each type. enum LbPolicy { @@ -170,111 +117,14 @@ message Cluster { // specific load balancer. Consult the configured cluster's documentation for whether to set // this option or not. CLUSTER_PROVIDED = 6; - } - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum.defined_only = true]; - - // If the service discovery type is - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS`, - // then hosts is required. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`load_assignment` field instead. - // - repeated core.Address hosts = 7; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes :ref:`hosts` field. - // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` - // once load_assignment is implemented.] - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // Setting this overrides :ref:`hosts` values. - // - ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.HealthCheck health_checks = 8; - - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; - - // Optional :ref:`circuit breaking ` for the cluster. - cluster.CircuitBreakers circuit_breakers = 10; - - // The TLS configuration for connections to the upstream cluster. If no TLS - // configuration is specified, TLS will not be used for new connections. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - auth.UpstreamTlsContext tls_context = 11; - - reserved 12; - - // Additional options when handling HTTP requests. These options will be applicable to both - // HTTP1 and HTTP2 requests. - core.HttpProtocolOptions common_http_protocol_options = 29; - - // Additional options when handling HTTP1 requests. - core.Http1ProtocolOptions http_protocol_options = 13; - // Even if default HTTP2 protocol options are desired, this field must be - // set so that Envoy will assume that the upstream supports HTTP/2 when - // making new HTTP connection pool connections. Currently, Envoy only - // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - // connections to happen over plain text. - core.Http2ProtocolOptions http2_protocol_options = 14; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map extension_protocol_options = 35; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map typed_extension_protocol_options = 36; - - reserved 15; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. If this setting is not specified, the value defaults to 5000ms. For - // cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration.gt = {}]; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; + // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy + // ` field to determine the LB policy. + // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field + // and instead using the new load_balancing_policy field as the one and only mechanism for + // configuring this.] + LOAD_BALANCING_POLICY_CONFIG = 7; + } // When V4_ONLY is selected, the DNS resolver will only perform a lookup for // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will @@ -292,52 +142,57 @@ message Cluster { V6_ONLY = 2; } - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum.defined_only = true]; + enum ClusterProtocolSelection { + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + USE_CONFIGURED_PROTOCOL = 0; - // If DNS resolvers are specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used to specify the cluster’s dns resolvers. - // If this setting is not specified, the value defaults to the default - // resolver, which uses /etc/resolv.conf for configuration. For cluster types - // other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - repeated core.Address dns_resolvers = 18; + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + USE_DOWNSTREAM_PROTOCOL = 1; + } - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - cluster.OutlierDetection outlier_detection = 19; + // TransportSocketMatch specifies what transport socket config will be used + // when the match conditions are satisfied. + message TransportSocketMatch { + // The name of the match, used in stats generation. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Optional endpoint metadata match criteria. + // The connection to the endpoint with metadata matching what is set in this field + // will use the transport socket configuration specified here. + // The endpoint's metadata entry in *envoy.transport_socket* is used to match + // against the values specified in this field. + google.protobuf.Struct match = 2; + + // The configuration of the transport socket. + core.TransportSocket transport_socket = 3; + } - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration.gt = {}]; + // Extended cluster type. + message CustomClusterType { + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.BindConfig upstream_bind_config = 21; + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + google.protobuf.Any typed_config = 2; + } + + // Only valid when discovery type is EDS. + message EdsClusterConfig { + // Configuration for the source of EDS updates for this Cluster. + core.ConfigSource eds_config = 1; + + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + } // Optionally divide the endpoints in this cluster into subsets defined by // endpoint metadata and selected by route and weighted cluster metadata. message LbSubsetConfig { - // If NO_FALLBACK is selected, a result // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, // any cluster endpoint may be returned (subject to policy, health checks, @@ -349,50 +204,54 @@ message Cluster { DEFAULT_SUBSET = 2; } - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum.defined_only = true]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - // Specifications for subsets. message LbSubsetSelector { - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum.defined_only = true]; - // Allows to override top level fallback policy per selector. enum LbSubsetSelectorFallbackPolicy { // If NOT_DEFINED top level config fallback policy is used instead. NOT_DEFINED = 0; + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. NO_FALLBACK = 1; + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned // (subject to policy, health checks, etc). ANY_ENDPOINT = 2; + // If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. DEFAULT_SUBSET = 3; } - } - // For each entry, LbEndpoint.Metadata's - // *envoy.lb* namespace is traversed and a subset is created for each unique - // combination of key and value. For example: - // - // .. code-block:: json + // List of keys to match with the weighted cluster metadata. + repeated string keys = 1; + + // The behavior used when no endpoint subset matches the selected route's + // metadata. + LbSubsetSelectorFallbackPolicy fallback_policy = 2 + [(validate.rules).enum = {defined_only: true}]; + } + + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; + + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the *envoy.lb* + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + google.protobuf.Struct default_subset = 2; + + // For each entry, LbEndpoint.Metadata's + // *envoy.lb* namespace is traversed and a subset is created for each unique + // combination of key and value. For example: + // + // .. code-block:: json // // { "subset_selectors": [ // { "keys": [ "version" ] }, @@ -436,45 +295,43 @@ message Cluster { bool list_as_any = 7; } - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - // Specific configuration for the LeastRequest load balancing policy. message LeastRequestLbConfig { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32.gte = 2]; + google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; } // Specific configuration for the :ref:`RingHash` // load balancing policy. message RingHashLbConfig { - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64.lte = 8388608]; - - reserved 2; - // The hash function used to hash hosts onto the ketama ring. enum HashFunction { // Use `xxHash `_, this is the default hash function. XX_HASH = 0; + // Use `MurmurHash2 `_, this is compatible with // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled // on Linux and not macOS. MURMUR_HASH_2 = 1; } + reserved 2; + + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; + // The hash function used to hash hosts onto the ketama ring. The value defaults to // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum.defined_only = true]; + HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered // to further constrain resource use. See also // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64.lte = 8388608]; + google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; } // Specific configuration for the @@ -493,31 +350,8 @@ message Cluster { bool use_http_header = 1; } - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - // Common configuration for all load balancer implementations. message CommonLbConfig { - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - envoy.type.Percent healthy_panic_threshold = 1; // Configuration for :ref:`zone aware routing // `. message ZoneAwareLbConfig { @@ -525,22 +359,41 @@ message Cluster { // if zone aware routing is configured. If not specified, the default is 100%. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. - envoy.type.Percent routing_enabled = 1; + type.Percent routing_enabled = 1; + // Configures minimum upstream cluster size required for zone aware routing // If upstream cluster size is less than specified, zone aware routing is not performed // even if zone aware routing is configured. If not specified, the default is 6. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. google.protobuf.UInt64Value min_cluster_size = 2; + + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + bool fail_traffic_on_panic = 3; } + // Configuration for :ref:`locality weighted load balancing // ` message LocalityWeightedLbConfig { } + + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // The specified percent will be truncated to the nearest 1%. + type.Percent healthy_panic_threshold = 1; + oneof locality_config_specifier { ZoneAwareLbConfig zone_aware_lb_config = 2; + LocalityWeightedLbConfig locality_weighted_lb_config = 3; } + // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when // the first update happens. This is useful for big clusters, with potentially noisy deploys @@ -583,6 +436,252 @@ message Cluster { bool close_connections_on_host_set_change = 6; } + reserved 12, 15; + + // Configuration to use different transport sockets for different endpoints. + // The entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata ` + // is used to match against the transport sockets as they appear in the list. The first + // :ref:`match ` is used. + // For example, with the following match + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "enableMTLS" + // match: + // acceptMTLS: true + // transport_socket: + // name: tls + // config: { ... } # tls socket configuration + // - name: "defaultToPlaintext" + // match: {} + // transport_socket: + // name: "rawbuffer" + // + // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + // + // If a :ref:`socket match ` with empty match + // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + // socket match in case above. + // + // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or + // *transport_socket* specified in this cluster. + // + // This field allows gradual and flexible transport socket configuration changes. + // + // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + // has "acceptPlaintext": "true" metadata information. + // + // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + // *TransportSocketMatch* in this field. Other client Envoys receive CDS without + // *transport_socket_match* set, and still send plain text traffic to the same cluster. + // + // TODO(incfly): add a detailed architecture doc on intended usage. + // [#not-implemented-hide:] + repeated TransportSocketMatch transport_socket_matches = 43; + + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // An optional alternative to the cluster name to be used while emitting stats. + // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be + // confused with :ref:`Router Filter Header + // `. + string alt_stat_name = 28; + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } + + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig eds_cluster_config = 3; + + // The timeout for new network connections to hosts in the cluster. + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + + // If the service discovery type is + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS`, + // then hosts is required. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`load_assignment` field instead. + // + repeated core.Address hosts = 7; + + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes :ref:`hosts` field. + // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` + // once load_assignment is implemented.] + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + // Setting this overrides :ref:`hosts` values. + // + ClusterLoadAssignment load_assignment = 33; + + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + repeated core.HealthCheck health_checks = 8; + + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + google.protobuf.UInt32Value max_requests_per_connection = 9; + + // Optional :ref:`circuit breaking ` for the cluster. + cluster.CircuitBreakers circuit_breakers = 10; + + // The TLS configuration for connections to the upstream cluster. If no TLS + // configuration is specified, TLS will not be used for new connections. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + auth.UpstreamTlsContext tls_context = 11; + + // Additional options when handling HTTP requests. These options will be applicable to both + // HTTP1 and HTTP2 requests. + core.HttpProtocolOptions common_http_protocol_options = 29; + + // Additional options when handling HTTP1 requests. + core.Http1ProtocolOptions http_protocol_options = 13; + + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + core.Http2ProtocolOptions http2_protocol_options = 14; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map extension_protocol_options = 35; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map typed_extension_protocol_options = 36; + + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. If this setting is not specified, the value defaults to 5000ms. For + // cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration = {gt {}}]; + + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + bool respect_dns_ttl = 39; + + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + repeated core.Address dns_resolvers = 18; + + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + cluster.OutlierDetection outlier_detection = 19; + + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + core.BindConfig upstream_bind_config = 21; + + // Configuration for load balancing subsetting. + LbSubsetConfig lb_subset_config = 22; + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; + + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; + + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + } + // Common configuration for all load balancer implementations. CommonLbConfig common_lb_config = 27; @@ -596,20 +695,11 @@ message Cluster { // the Router filter, the filter name should be specified as *envoy.router*. core.Metadata metadata = 25; - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - // Determines how Envoy selects the protocol used to speak to upstream hosts. ClusterProtocolSelection protocol_selection = 26; // Optional options for upstream connections. - envoy.api.v3alpha.UpstreamConnectionOptions upstream_connection_options = 30; + UpstreamConnectionOptions upstream_connection_options = 30; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. @@ -638,6 +728,66 @@ message Cluster { // The chain will be applied to all outgoing connections that Envoy makes to the upstream // servers of this cluster. repeated cluster.Filter filters = 40; + + // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the + // :ref:`lb_policy` field has the value + // :ref:`LOAD_BALANCING_POLICY_CONFIG`. + LoadBalancingPolicy load_balancing_policy = 41; + + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + core.ConfigSource lrs_server = 42; +} + +// [#not-implemented-hide:] Extensible load balancing policy configuration. +// +// Every LB policy defined via this mechanism will be identified via a unique name using reverse +// DNS notation. If the policy needs configuration parameters, it must define a message for its +// own configuration, which will be stored in the config field. The name of the policy will tell +// clients which type of message they should expect to see in the config field. +// +// Note that there are cases where it is useful to be able to independently select LB policies +// for choosing a locality and for choosing an endpoint within that locality. For example, a +// given deployment may always use the same policy to choose the locality, but for choosing the +// endpoint within the locality, some clusters may use weighted-round-robin, while others may +// use some sort of session-based balancing. +// +// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a +// child LB policy for each locality. For each request, the parent chooses the locality and then +// delegates to the child policy for that locality to choose the endpoint within the locality. +// +// To facilitate this, the config message for the top-level LB policy may include a field of +// type LoadBalancingPolicy that specifies the child policy. +// +// [#proto-status: experimental] +message LoadBalancingPolicy { + message Policy { + // Required. The name of the LB policy. + string name = 1; + + // Optional config for the LB policy. + // No more than one of these two fields may be populated. + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + + // Each client will iterate over the list in order and stop at the first policy that it + // supports. This provides a mechanism for starting to use new LB policies that are not yet + // supported by all clients. + repeated Policy policies = 1; } // An extensible structure containing the address Envoy should bind to when diff --git a/api/envoy/api/v3alpha/cluster/circuit_breaker.proto b/api/envoy/api/v3alpha/cluster/circuit_breaker.proto index 0b6d2fe99217..277459f9dd5d 100644 --- a/api/envoy/api/v3alpha/cluster/circuit_breaker.proto +++ b/api/envoy/api/v3alpha/cluster/circuit_breaker.proto @@ -17,7 +17,6 @@ import "google/protobuf/wrappers.proto"; // :ref:`Circuit breaking` settings can be // specified individually for each defined priority. message CircuitBreakers { - // A Thresholds defines CircuitBreaker settings for a // :ref:`RoutingPriority`. message Thresholds { diff --git a/api/envoy/api/v3alpha/cluster/filter.proto b/api/envoy/api/v3alpha/cluster/filter.proto index 1bf3433ad29a..f1c5cc5d2737 100644 --- a/api/envoy/api/v3alpha/cluster/filter.proto +++ b/api/envoy/api/v3alpha/cluster/filter.proto @@ -18,7 +18,7 @@ import "validate/validate.proto"; message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/api/v3alpha/cluster/outlier_detection.proto b/api/envoy/api/v3alpha/cluster/outlier_detection.proto index 0954b85f2cc8..8e768803410d 100644 --- a/api/envoy/api/v3alpha/cluster/outlier_detection.proto +++ b/api/envoy/api/v3alpha/cluster/outlier_detection.proto @@ -26,26 +26,26 @@ message OutlierDetection { // The time interval between ejection analysis sweeps. This can result in // both new ejections as well as hosts being returned to service. Defaults // to 10000ms or 10s. - google.protobuf.Duration interval = 2 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the // base time multiplied by the number of times the host has been ejected. // Defaults to 30000ms or 30s. - google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; // The maximum % of an upstream cluster that can be ejected due to outlier // detection. Defaults to 10% but will eject at least one host regardless of the value. - google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive 5xx. This setting can be used to disable // ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics. This setting can be used to // disable ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32.lte = 100]; + google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; // The number of hosts in a cluster that must have enough request volume to // detect success rate outliers. If the number of hosts is less than this @@ -77,7 +77,7 @@ message OutlierDetection { // is detected through consecutive gateway failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // Determines whether to distinguish local origin failures from external errors. If set to true // the following configuration parameters are taken into account: @@ -101,7 +101,7 @@ message OutlierDetection { // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics for locally originated errors. @@ -110,5 +110,37 @@ message OutlierDetection { // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 - [(validate.rules).uint32.lte = 100]; + [(validate.rules).uint32 = {lte: 100}]; + + // The failure percentage to use when determining failure percentage-based outlier detection. If + // the failure percentage of a given host is greater than or equal to this value, it will be + // ejected. Defaults to 85. + google.protobuf.UInt32Value failure_percentage_threshold = 16 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status is detected through + // failure percentage statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + // + // [#next-major-version: setting this without setting failure_percentage_threshold should be + // invalid in v4.] + google.protobuf.UInt32Value enforcing_failure_percentage = 17 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status is detected through + // local-origin failure percentage statistics. This setting can be used to disable ejection or to + // ramp it up slowly. Defaults to 0. + google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 + [(validate.rules).uint32 = {lte: 100}]; + + // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. + // If the total number of hosts in the cluster is less than this value, failure percentage-based + // ejection will not be performed. Defaults to 5. + google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; + + // The minimum number of total requests that must be collected in one interval (as defined by the + // interval duration above) to perform failure percentage-based ejection for this host. If the + // volume is lower than this setting, failure percentage-based ejection will not be performed for + // this host. Defaults to 50. + google.protobuf.UInt32Value failure_percentage_request_volume = 20; } diff --git a/api/envoy/api/v3alpha/core/address.proto b/api/envoy/api/v3alpha/core/address.proto index 80ab295b2bf7..8f72dab8788b 100644 --- a/api/envoy/api/v3alpha/core/address.proto +++ b/api/envoy/api/v3alpha/core/address.proto @@ -19,16 +19,19 @@ message Pipe { // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. - string path = 1 [(validate.rules).string.min_bytes = 1]; + string path = 1 [(validate.rules).string = {min_bytes: 1}]; } message SocketAddress { enum Protocol { TCP = 0; + // [#not-implemented-hide:] UDP = 1; } - Protocol protocol = 1 [(validate.rules).enum.defined_only = true]; + + Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; + // The address for this socket. :ref:`Listeners ` will bind // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: @@ -40,15 +43,19 @@ message SocketAddress { // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string.min_bytes = 1]; + string address = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof port_specifier { option (validate.required) = true; - uint32 port_value = 3 [(validate.rules).uint32.lte = 65535]; + + uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; + // This is only valid if :ref:`resolver_name // ` is specified below and the // named resolver is capable of named port resolution. string named_port = 4; } + // The name of the custom resolver. This must have been registered with Envoy. If // this is empty, a context dependent default applies. If the address is a concrete // IP address, no resolution will occur. If address is a hostname this @@ -68,10 +75,12 @@ message TcpKeepalive { // the connection is dead. Default is to use the OS level configuration (unless // overridden, Linux defaults to 9.) google.protobuf.UInt32Value keepalive_probes = 1; + // The number of seconds a connection needs to be idle before keep-alive probes // start being sent. Default is to use the OS level configuration (unless // overridden, Linux defaults to 7200s (ie 2 hours.) google.protobuf.UInt32Value keepalive_time = 2; + // The number of seconds between keep-alive probes. Default is to use the OS // level configuration (unless overridden, Linux defaults to 75s.) google.protobuf.UInt32Value keepalive_interval = 3; @@ -79,7 +88,7 @@ message TcpKeepalive { message BindConfig { // The address to bind to when creating a socket. - SocketAddress source_address = 1 [(validate.rules).message.required = true]; + SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; // Whether to set the *IP_FREEBIND* option when creating the socket. When this // flag is set to true, allows the :ref:`source_address @@ -103,6 +112,7 @@ message Address { option (validate.required) = true; SocketAddress socket_address = 1; + Pipe pipe = 2; } } @@ -111,7 +121,8 @@ message Address { // the subnet mask for a `CIDR `_ range. message CidrRange { // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + // Length of prefix, e.g. 0, 32. - google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32.lte = 128]; + google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; } diff --git a/api/envoy/api/v3alpha/core/base.proto b/api/envoy/api/v3alpha/core/base.proto index a7b2f54d692f..abef2963644c 100644 --- a/api/envoy/api/v3alpha/core/base.proto +++ b/api/envoy/api/v3alpha/core/base.proto @@ -7,6 +7,7 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.core"; import "envoy/api/v3alpha/core/http_uri.proto"; +import "envoy/type/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; @@ -14,10 +15,46 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -import "envoy/type/percent.proto"; - // [#protodoc-title: Common types] +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +enum RoutingPriority { + DEFAULT = 0; + HIGH = 1; +} + +// HTTP request method. +enum RequestMethod { + METHOD_UNSPECIFIED = 0; + GET = 1; + HEAD = 2; + POST = 3; + PUT = 4; + DELETE = 5; + CONNECT = 6; + OPTIONS = 7; + TRACE = 8; + PATCH = 9; +} + +// Identifies the direction of the traffic relative to the local Envoy. +enum TrafficDirection { + // Default option is unspecified. + UNSPECIFIED = 0; + + // The transport is used for incoming traffic. + INBOUND = 1; + + // The transport is used for outgoing traffic. + OUTBOUND = 2; +} + // Identifies location of where either Envoy runs or where upstream hosts run. message Locality { // Region this :ref:`zone ` belongs to. @@ -110,52 +147,26 @@ message RuntimeUInt32 { uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string.min_bytes = 1]; -} - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -enum RoutingPriority { - DEFAULT = 0; - HIGH = 1; -} - -// HTTP request method. -enum RequestMethod { - METHOD_UNSPECIFIED = 0; - GET = 1; - HEAD = 2; - POST = 3; - PUT = 4; - DELETE = 5; - CONNECT = 6; - OPTIONS = 7; - TRACE = 8; - PATCH = 9; + string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; } // Header name/value pair. message HeaderValue { // Header name. - string key = 1 [(validate.rules).string = {min_bytes: 1, max_bytes: 16384}]; + string key = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 16384}]; // Header value. // // The same :ref:`format specifier ` as used for // :ref:`HTTP access logging ` applies here, however // unknown header values are replaced with the empty string instead of `-`. - string value = 2 [(validate.rules).string.max_bytes = 16384]; + string value = 2 [(validate.rules).string = {max_bytes: 16384}]; } // Header name/value pair plus option to control append behavior. message HeaderValueOption { // Header name/value pair that this option applies to. - HeaderValue header = 1 [(validate.rules).message.required = true]; + HeaderValue header = 1 [(validate.rules).message = {required: true}]; // Should the value be appended? If true (default), the value is appended to // existing values. @@ -173,23 +184,23 @@ message DataSource { option (validate.required) = true; // Local filesystem data source. - string filename = 1 [(validate.rules).string.min_bytes = 1]; + string filename = 1 [(validate.rules).string = {min_bytes: 1}]; // Bytes inlined in the configuration. - bytes inline_bytes = 2 [(validate.rules).bytes.min_len = 1]; + bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string.min_bytes = 1]; + string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; } } // The message specifies how to fetch data from remote and how to verify it. message RemoteDataSource { // The HTTP URI to fetch the remote data. - HttpUri http_uri = 1 [(validate.rules).message.required = true]; + HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string.min_bytes = 1]; + string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; } // Async data source which support async data fetch. @@ -212,7 +223,7 @@ message AsyncDataSource { message TransportSocket { // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. @@ -226,39 +237,47 @@ message TransportSocket { // Generic socket option message. This would be used to set socket options that // might not exist in upstream kernels or precompiled Envoy binaries. message SocketOption { + enum SocketState { + // Socket options are applied after socket creation but before binding the socket to a port + STATE_PREBIND = 0; + + // Socket options are applied after binding the socket to a port but before calling listen() + STATE_BOUND = 1; + + // Socket options are applied after calling listen() + STATE_LISTENING = 2; + } + // An optional name to give this socket option for debugging, etc. // Uniqueness is not required and no special meaning is assumed. string description = 1; + // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP int64 level = 2; + // The numeric name as passed to setsockopt int64 name = 3; + oneof value { option (validate.required) = true; // Because many sockopts take an int value. int64 int_value = 4; + // Otherwise it's a byte buffer. bytes buf_value = 5; } - enum SocketState { - // Socket options are applied after socket creation but before binding the socket to a port - STATE_PREBIND = 0; - // Socket options are applied after binding the socket to a port but before calling listen() - STATE_BOUND = 1; - // Socket options are applied after calling listen() - STATE_LISTENING = 2; - } + // The state in which the option will be applied. When used in BindConfig // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum.defined_only = true]; + SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; } // Runtime derived FractionalPercent with defaults for when the numerator or denominator is not // specified via a runtime key. message RuntimeFractionalPercent { // Default value if the runtime value's for the numerator/denominator keys are not available. - envoy.type.FractionalPercent default_value = 1 [(validate.rules).message.required = true]; + type.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; // Runtime key for a YAML representation of a FractionalPercent. string runtime_key = 2; @@ -271,15 +290,3 @@ message ControlPlane { // the Envoy is connected to. string identifier = 1; } - -// Identifies the direction of the traffic relative to the local Envoy. -enum TrafficDirection { - // Default option is unspecified. - UNSPECIFIED = 0; - - // The transport is used for incoming traffic. - INBOUND = 1; - - // The transport is used for outgoing traffic. - OUTBOUND = 2; -} diff --git a/api/envoy/api/v3alpha/core/config_source.proto b/api/envoy/api/v3alpha/core/config_source.proto index 1c4510322e15..b3b4e39647c8 100644 --- a/api/envoy/api/v3alpha/core/config_source.proto +++ b/api/envoy/api/v3alpha/core/config_source.proto @@ -23,12 +23,15 @@ message ApiConfigSource { // Ideally this would be 'reserved 0' but one can't reserve the default // value. Instead we throw an exception if this is ever used. UNSUPPORTED_REST_LEGACY = 0 [deprecated = true]; + // REST-JSON v2 API. The `canonical JSON encoding // `_ for // the v2 protos is used. REST = 1; + // gRPC v2 API. GRPC = 2; + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. @@ -37,7 +40,9 @@ message ApiConfigSource { // Do not use for other xDSes. TODO(fredlas) update/remove this warning when appropriate. DELTA_GRPC = 3; } - ApiType api_type = 1 [(validate.rules).enum.defined_only = true]; + + ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; + // Cluster names should be used only with REST. If > 1 // cluster is defined, clusters will be cycled through if any kind of failure // occurs. @@ -56,7 +61,7 @@ message ApiConfigSource { google.protobuf.Duration refresh_delay = 3; // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration.gt.seconds = 0]; + google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be // rate limited. @@ -72,6 +77,13 @@ message ApiConfigSource { message AggregatedConfigSource { } +// [#not-implemented-hide:] +// Self-referencing config source options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that other data can be obtained from the same server. +message SelfConfigSource { +} + // Rate Limit settings to be applied for discovery requests made by Envoy. message RateLimitSettings { // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a @@ -80,7 +92,7 @@ message RateLimitSettings { // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens // per second will be used. - google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double.gt = 0.0]; + google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; } // Configuration for :ref:`listeners `, :ref:`clusters @@ -89,9 +101,11 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. +// [#comment:next free field: 6] message ConfigSource { oneof config_source_specifier { option (validate.required) = true; + // Path on the filesystem to source and watch for configuration updates. // // .. note:: @@ -104,11 +118,26 @@ message ConfigSource { // are atomic. The same method of swapping files as is demonstrated in the // :ref:`runtime documentation ` can be used here also. string path = 1; + // API configuration source. ApiConfigSource api_config_source = 2; + // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. AggregatedConfigSource ads = 3; + + // [#not-implemented-hide:] + // When set, the client will access the resources from the same server it got the + // ConfigSource from, although not necessarily from the same stream. This is similar to the + // :ref:`ads` field, except that the client may use a + // different stream to the same server. As a result, this field can be used for things + // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) + // LDS to RDS on the same server without requiring the management server to know its name + // or required credentials. + // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since + // this field can implicitly mean to use the same stream in the case where the ConfigSource + // is provided via ADS and the specified data can also be obtained via ADS.] + SelfConfigSource self = 5; } // When this timeout is specified, Envoy will wait no longer than the specified time for first diff --git a/api/envoy/api/v3alpha/core/grpc_service.proto b/api/envoy/api/v3alpha/core/grpc_service.proto index dd8b90d72cad..a4c2d66fc891 100644 --- a/api/envoy/api/v3alpha/core/grpc_service.proto +++ b/api/envoy/api/v3alpha/core/grpc_service.proto @@ -10,8 +10,8 @@ import "envoy/api/v3alpha/core/base.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; import "validate/validate.proto"; @@ -24,16 +24,11 @@ message GrpcService { // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`tls_context // `. - string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } // [#proto-status: draft] message GoogleGrpc { - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string.min_bytes = 1]; - // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. message SslCredentials { // PEM encoded server root certificates. @@ -56,6 +51,7 @@ message GrpcService { message ChannelCredentials { oneof credential_specifier { option (validate.required) = true; + SslCredentials ssl_credentials = 1; // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 @@ -65,21 +61,22 @@ message GrpcService { } } - ChannelCredentials channel_credentials = 2; - message CallCredentials { message ServiceAccountJWTAccessCredentials { string json_key = 1; + uint64 token_lifetime_seconds = 2; } message GoogleIAMCredentials { string authorization_token = 1; + string authority_selector = 2; } message MetadataCredentialsFromPlugin { string name = 1; + oneof config_type { google.protobuf.Struct config = 2; @@ -117,6 +114,13 @@ message GrpcService { } } + // The target URI when using the `Google C++ gRPC client + // `_. SSL credentials will be supplied in + // :ref:`channel_credentials `. + string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; + + ChannelCredentials channel_credentials = 2; + // A set of call credentials that can be composed with `channel credentials // `_. repeated CallCredentials call_credentials = 3; @@ -130,7 +134,7 @@ message GrpcService { // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel @@ -142,6 +146,8 @@ message GrpcService { google.protobuf.Struct config = 6; } + reserved 4; + oneof target_specifier { option (validate.required) = true; @@ -160,9 +166,6 @@ message GrpcService { // request. google.protobuf.Duration timeout = 3; - // Field 4 reserved due to moving credentials inside the GoogleGrpc message - reserved 4; - // Additional metadata to include in streams initiated to the GrpcService. // This can be used for scenarios in which additional ad hoc authorization // headers (e.g. `x-foo-bar: baz-key`) are to be injected. diff --git a/api/envoy/api/v3alpha/core/health_check.proto b/api/envoy/api/v3alpha/core/health_check.proto index 5000918a40c6..6d524f54b519 100644 --- a/api/envoy/api/v3alpha/core/health_check.proto +++ b/api/envoy/api/v3alpha/core/health_check.proto @@ -21,60 +21,40 @@ import "validate/validate.proto"; // * If health checking is configured for a cluster, additional statistics are emitted. They are // documented :ref:`here `. -message HealthCheck { - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - google.protobuf.Duration timeout = 1 [(validate.rules).duration = { - required: true, - gt: {seconds: 0} - }]; - - // The interval between health checks. - google.protobuf.Duration interval = 2 [(validate.rules).duration = { - required: true, - gt: {seconds: 0} - }]; - - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - google.protobuf.Duration initial_jitter = 20; - - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - google.protobuf.Duration interval_jitter = 3; +// Endpoint health status. +enum HealthStatus { + // The health status is not known. This is interpreted by Envoy as *HEALTHY*. + UNKNOWN = 0; - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - uint32 interval_jitter_percent = 18; + // Healthy. + HEALTHY = 1; - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4; + // Unhealthy. + UNHEALTHY = 2; - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5; + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as *UNHEALTHY*. + DRAINING = 3; - // [#not-implemented-hide:] Non-serving port for health checking. - google.protobuf.UInt32Value alt_port = 6; + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // *UNHEALTHY*. + TIMEOUT = 4; - // Reuse health check connection between health checks. Default is true. - google.protobuf.BoolValue reuse_connection = 7; + // Degraded. + DEGRADED = 5; +} +message HealthCheck { // Describes the encoding of the payload bytes in the payload. message Payload { oneof payload { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string.min_bytes = 1]; + string text = 1 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; @@ -90,7 +70,7 @@ message HealthCheck { // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. - string path = 2 [(validate.rules).string.min_bytes = 1]; + string path = 2 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; @@ -107,8 +87,8 @@ message HealthCheck { // health checked cluster. For more information, including details on header value syntax, see // the documentation on :ref:`custom request headers // `. - repeated core.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated .max_items = 1000]; + repeated HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request that is sent to the // health checked cluster. @@ -120,7 +100,7 @@ message HealthCheck { // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open // semantics of :ref:`Int64Range `. - repeated envoy.type.Int64Range expected_statuses = 9; + repeated type.Int64Range expected_statuses = 9; } message TcpHealthCheck { @@ -162,7 +142,7 @@ message HealthCheck { // Custom health check. message CustomHealthCheck { // The registered name of the custom health checker. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. @@ -173,6 +153,54 @@ message HealthCheck { } } + reserved 10; + + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + google.protobuf.Duration timeout = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // The interval between health checks. + google.protobuf.Duration interval = 2 [(validate.rules).duration = { + required: true + gt {} + }]; + + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + google.protobuf.Duration initial_jitter = 20; + + // An optional jitter amount in milliseconds. If specified, during every + // interval Envoy will add interval_jitter to the wait time. + google.protobuf.Duration interval_jitter = 3; + + // An optional jitter amount as a percentage of interval_ms. If specified, + // during every interval Envoy will add interval_ms * + // interval_jitter_percent / 100 to the wait time. + // + // If interval_jitter_ms and interval_jitter_percent are both set, both of + // them will be used to increase the wait time. + uint32 interval_jitter_percent = 18; + + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for *http* health checking if a host responds with 503 + // this threshold is ignored and the host is considered unhealthy immediately. + google.protobuf.UInt32Value unhealthy_threshold = 4; + + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + google.protobuf.UInt32Value healthy_threshold = 5; + + // [#not-implemented-hide:] Non-serving port for health checking. + google.protobuf.UInt32Value alt_port = 6; + + // Reuse health check connection between health checks. Default is true. + google.protobuf.BoolValue reuse_connection = 7; + oneof health_checker { option (validate.required) = true; @@ -189,10 +217,6 @@ message HealthCheck { CustomHealthCheck custom_health_check = 13; } - reserved 10; // redis_health_check is deprecated by :ref:`custom_health_check - // ` - reserved "redis_health_check"; - // The "no traffic interval" is a special health check interval that is used when a cluster has // never had traffic routed to it. This lower interval allows cluster information to be kept up to // date, without sending a potentially large amount of active health checking traffic for no @@ -201,14 +225,14 @@ message HealthCheck { // any other. // // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks @@ -216,14 +240,14 @@ message HealthCheck { // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; // Specifies the path to the :ref:`health check event log `. // If empty, no event log will be written. @@ -234,29 +258,3 @@ message HealthCheck { // The default value is false. bool always_log_health_check_failures = 19; } - -// Endpoint health status. -enum HealthStatus { - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0; - - // Healthy. - HEALTHY = 1; - - // Unhealthy. - UNHEALTHY = 2; - - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3; - - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - TIMEOUT = 4; - - // Degraded. - DEGRADED = 5; -} diff --git a/api/envoy/api/v3alpha/core/http_uri.proto b/api/envoy/api/v3alpha/core/http_uri.proto index 9c8e85b44150..e07e99dbb07f 100644 --- a/api/envoy/api/v3alpha/core/http_uri.proto +++ b/api/envoy/api/v3alpha/core/http_uri.proto @@ -22,7 +22,7 @@ message HttpUri { // // uri: https://www.googleapis.com/oauth2/v1/certs // - string uri = 1 [(validate.rules).string.min_bytes = 1]; + string uri = 1 [(validate.rules).string = {min_bytes: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or @@ -30,6 +30,7 @@ message HttpUri { // `_. oneof http_upstream_type { option (validate.required) = true; + // A cluster is created in the Envoy "cluster_manager" config // section. This field specifies the cluster name. // @@ -39,10 +40,12 @@ message HttpUri { // // cluster: jwks_cluster // - string cluster = 2 [(validate.rules).string.min_bytes = 1]; + string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 - [(validate.rules).duration.gte = {}, (validate.rules).duration.required = true]; + google.protobuf.Duration timeout = 3 [(validate.rules).duration = { + required: true + gte {} + }]; } diff --git a/api/envoy/api/v3alpha/core/protocol.proto b/api/envoy/api/v3alpha/core/protocol.proto index b0b29e630eb1..e97612928ca4 100644 --- a/api/envoy/api/v3alpha/core/protocol.proto +++ b/api/envoy/api/v3alpha/core/protocol.proto @@ -1,5 +1,3 @@ -// [#protodoc-title: Protocol options] - syntax = "proto3"; package envoy.api.v3alpha.core; @@ -58,7 +56,7 @@ message Http2ProtocolOptions { // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. google.protobuf.UInt32Value max_concurrent_streams = 2 - [(validate.rules).uint32 = {gte: 1, lte: 2147483647}]; + [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; // `Initial stream-level flow-control window // `_ size. Valid values range from 65535 @@ -67,17 +65,17 @@ message Http2ProtocolOptions { // // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default // window size now, so it's also the minimum. - + // // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the codec buffers. google.protobuf.UInt32Value initial_stream_window_size = 3 - [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Similar to *initial_stream_window_size*, but for connection-level flow-control // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. google.protobuf.UInt32Value initial_connection_window_size = 4 - [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Allows proxying Websocket and other upgrades over H2 connect. bool allow_connect = 5; diff --git a/api/envoy/api/v3alpha/eds.proto b/api/envoy/api/v3alpha/eds.proto index da149db20bd7..76400eee4642 100644 --- a/api/envoy/api/v3alpha/eds.proto +++ b/api/envoy/api/v3alpha/eds.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha; option java_outer_classname = "EdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha"; - option java_generic_services = true; import "envoy/api/v3alpha/discovery.proto"; @@ -13,10 +12,10 @@ import "envoy/api/v3alpha/endpoint/endpoint.proto"; import "envoy/type/percent.proto"; import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -import "google/protobuf/wrappers.proto"; -import "google/protobuf/duration.proto"; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` @@ -48,29 +47,18 @@ service EndpointDiscoveryService { // load_balancing_weight of its locality. First, a locality will be selected, // then an endpoint within that locality will be chose based on its weight. message ClusterLoadAssignment { - // Name of the cluster. This will be the :ref:`service_name - // ` value if specified - // in the cluster :ref:`EdsClusterConfig - // `. - string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; - - // List of endpoints to load balance to. - repeated endpoint.LocalityLbEndpoints endpoints = 2; - - // Map of named endpoints that can be referenced in LocalityLbEndpoints. - map named_endpoints = 5; - // Load balancing policy settings. message Policy { - reserved 1; - message DropOverload { // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string.min_bytes = 1]; + string category = 1 [(validate.rules).string = {min_bytes: 1}]; // Percentage of traffic that should be dropped for the category. - envoy.type.FractionalPercent drop_percentage = 2; + type.FractionalPercent drop_percentage = 2; } + + reserved 1; + // Action to trim the overall incoming traffic to protect the upstream // hosts. This action allows protection in case the hosts are unable to // recover from an outage, or unable to autoscale or unable to handle @@ -106,13 +94,13 @@ message ClusterLoadAssignment { // // Read more at :ref:`priority levels ` and // :ref:`localities `. - google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32.gt = 0]; + google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; // The max time until which the endpoints from this assignment can be used. // If no new assignments are received before this time expires the endpoints // are considered stale and should be marked unhealthy. // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration.gt.seconds = 0]; + google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; // The flag to disable overprovisioning. If it is set to true, // :ref:`overprovisioning factor @@ -126,6 +114,18 @@ message ClusterLoadAssignment { bool disable_overprovisioning = 5; } + // Name of the cluster. This will be the :ref:`service_name + // ` value if specified + // in the cluster :ref:`EdsClusterConfig + // `. + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // List of endpoints to load balance to. + repeated endpoint.LocalityLbEndpoints endpoints = 2; + + // Map of named endpoints that can be referenced in LocalityLbEndpoints. + map named_endpoints = 5; + // Load balancing policy settings. Policy policy = 4; } diff --git a/api/envoy/api/v3alpha/endpoint/endpoint.proto b/api/envoy/api/v3alpha/endpoint/endpoint.proto index 62c3060dee0b..43eacd20b9d7 100644 --- a/api/envoy/api/v3alpha/endpoint/endpoint.proto +++ b/api/envoy/api/v3alpha/endpoint/endpoint.proto @@ -18,6 +18,17 @@ import "validate/validate.proto"; // Upstream host identifier. message Endpoint { + // The optional health check configuration. + message HealthCheckConfig { + // Optional alternative health check port value. + // + // By default the health check address port of an upstream host is the same + // as the host's serving address port. This provides an alternative health + // check port. Setting this with a non-zero value allows an upstream host + // to have different health check address port. + uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; + } + // The upstream host address. // // .. attention:: @@ -29,17 +40,6 @@ message Endpoint { // and will be resolved via DNS. core.Address address = 1; - // The optional health check configuration. - message HealthCheckConfig { - // Optional alternative health check port value. - // - // By default the health check address port of an upstream host is the same - // as the host's serving address port. This provides an alternative health - // check port. Setting this with a non-zero value allows an upstream host - // to have different health check address port. - uint32 port_value = 1 [(validate.rules).uint32.lte = 65535]; - } - // The optional health check configuration is used as configuration for the // health checker to contact the health checked host. // @@ -55,6 +55,7 @@ message LbEndpoint { // Upstream host identifier or a named reference. oneof host_identifier { Endpoint endpoint = 1; + string endpoint_name = 5; } diff --git a/api/envoy/api/v3alpha/lds.proto b/api/envoy/api/v3alpha/lds.proto index 794b25a4a395..cfc196d6fbc0 100644 --- a/api/envoy/api/v3alpha/lds.proto +++ b/api/envoy/api/v3alpha/lds.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha; option java_outer_classname = "LdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha"; - option java_generic_services = true; import "envoy/api/v3alpha/core/address.proto"; @@ -13,6 +12,7 @@ import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/discovery.proto"; import "envoy/api/v3alpha/listener/listener.proto"; import "envoy/api/v3alpha/listener/udp_listener_config.proto"; +import "envoy/config/listener/v3alpha/api_listener.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; @@ -42,8 +42,36 @@ service ListenerDiscoveryService { } } -// [#comment:next free field: 19] +// [#comment:next free field: 20] message Listener { + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // [#not-implemented-hide:] + message DeprecatedV1 { + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated in v2, all Listeners will bind to their port. An + // additional filter chain must be created for every original destination + // port this listener may redirect to in v2, with the original port + // specified in the FilterChainMatch destination_port field. + // + // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] + google.protobuf.BoolValue bind_to_port = 1; + } + + reserved 14; + // The unique name by which this listener is known. If no name is provided, // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically // updated or removed via :ref:`LDS ` a unique name must be provided. @@ -52,7 +80,7 @@ message Listener { // The address that the listener should listen on. In general, the address must be unique, though // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on // Linux as the actual port will be allocated by the OS. - core.Address address = 2 [(validate.rules).message.required = true]; + core.Address address = 2 [(validate.rules).message = {required: true}]; // A list of filter chains to consider for this listener. The // :ref:`FilterChain ` with the most specific @@ -87,34 +115,9 @@ message Listener { // Listener metadata. core.Metadata metadata = 6; - // [#not-implemented-hide:] - message DeprecatedV1 { - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // [V2-API-DIFF] This is deprecated in v2, all Listeners will bind to their - // port. An additional filter chain must be created for every original - // destination port this listener may redirect to in v2, with the original - // port specified in the FilterChainMatch destination_port field. - // - // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] - google.protobuf.BoolValue bind_to_port = 1; - } - // [#not-implemented-hide:] DeprecatedV1 deprecated_v1 = 7; - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - // The type of draining to perform at a listener-wide level. DrainType drain_type = 8; @@ -188,8 +191,6 @@ message Listener { // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - reserved 14; - // Specifies the intended direction of the traffic relative to the local Envoy. core.TrafficDirection traffic_direction = 16; @@ -200,4 +201,17 @@ message Listener { // ` = "raw_udp_listener" for // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". listener.UdpListenerConfig udp_listener_config = 18; + + // [#not-implemented-hide:] + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + config.listener.v3alpha.ApiListener api_listener = 19; } diff --git a/api/envoy/api/v3alpha/listener/BUILD b/api/envoy/api/v3alpha/listener/BUILD index 3ee071ca5c03..4a4836db9412 100644 --- a/api/envoy/api/v3alpha/listener/BUILD +++ b/api/envoy/api/v3alpha/listener/BUILD @@ -28,3 +28,9 @@ api_proto_library_internal( "//envoy/api/v3alpha/core:base", ], ) + +api_proto_library_internal( + name = "quic_config", + srcs = ["quic_config.proto"], + visibility = ["//envoy/api/v3alpha:friends"], +) diff --git a/api/envoy/api/v3alpha/listener/listener.proto b/api/envoy/api/v3alpha/listener/listener.proto index ef5063969472..a00b535c33bc 100644 --- a/api/envoy/api/v3alpha/listener/listener.proto +++ b/api/envoy/api/v3alpha/listener/listener.proto @@ -6,10 +6,10 @@ option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy::Api::V2::ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; -import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/auth/cert.proto"; +import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/core/base.proto"; import "google/protobuf/any.proto"; @@ -22,9 +22,11 @@ import "validate/validate.proto"; // Listener :ref:`configuration overview ` message Filter { + reserved 3; + // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. @@ -33,8 +35,6 @@ message Filter { google.protobuf.Any typed_config = 4; } - - reserved 3; } // Specifies the match criteria for selecting a specific filter chain for a @@ -66,9 +66,22 @@ message Filter { // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] message FilterChainMatch { + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + + // Match a connection originating from the same host. + LOCAL = 1; + + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + reserved 1; + // Optional destination port to consider when use_original_dst is set on the // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {gte: 1, lte: 65535}]; + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; // If non-empty, an IP address and prefix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. @@ -82,17 +95,8 @@ message FilterChainMatch { // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - // Match a connection originating from the same host. - LOCAL = 1; - // Match a connection originating from a different host. - EXTERNAL = 2; - } - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum.defined_only = true]; + ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; // The criteria is satisfied if the source IP address of the downstream // connection is contained in at least one of the specified subnets. If the @@ -103,7 +107,8 @@ message FilterChainMatch { // The criteria is satisfied if the source port of the downstream connection // is contained in at least one of the specified ports. If the parameter is // not specified, the source port is ignored. - repeated uint32 source_ports = 7 [(validate.rules).repeated .items.uint32 = {gte: 1, lte: 65535}]; + repeated uint32 source_ports = 7 + [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining // a filter chain match. Those values will be compared against the server names of a new @@ -151,9 +156,6 @@ message FilterChainMatch { // and matching on values other than ``h2`` is going to lead to a lot of false negatives, // unless all connecting clients are known to use ALPN. repeated string application_protocols = 10; - - reserved 1; - reserved "sni_domains"; } // A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and @@ -194,7 +196,7 @@ message FilterChain { message ListenerFilter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. diff --git a/api/envoy/api/v3alpha/listener/quic_config.proto b/api/envoy/api/v3alpha/listener/quic_config.proto new file mode 100644 index 000000000000..9c5a1af6450c --- /dev/null +++ b/api/envoy/api/v3alpha/listener/quic_config.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.api.v3alpha.listener; + +option java_outer_classname = "QuicConfigProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha.listener"; +option csharp_namespace = "Envoy.Api.V2.ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +// Configuration specific to the QUIC protocol. +// Next id: 4 +message QuicProtocolOptions { + // Maximum number of streams that the client can negotiate per connection. 100 + // if not specified. + google.protobuf.UInt32Value max_concurrent_streams = 1; + + // Maximum number of milliseconds that connection will be alive when there is + // no network activity. 300000ms if not specified. + google.protobuf.Duration idle_timeout = 2; + + // Connection timeout in milliseconds before the crypto handshake is finished. + // 20000ms if not specified. + google.protobuf.Duration crypto_handshake_timeout = 3; +} diff --git a/api/envoy/api/v3alpha/listener/udp_listener_config.proto b/api/envoy/api/v3alpha/listener/udp_listener_config.proto index 532028da9f73..3693e4b69160 100644 --- a/api/envoy/api/v3alpha/listener/udp_listener_config.proto +++ b/api/envoy/api/v3alpha/listener/udp_listener_config.proto @@ -6,10 +6,10 @@ option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy::Api::V2::ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; -import "google/protobuf/struct.proto"; import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; // [#protodoc-title: Udp Listener Config] // Listener :ref:`configuration overview ` diff --git a/api/envoy/api/v3alpha/ratelimit/ratelimit.proto b/api/envoy/api/v3alpha/ratelimit/ratelimit.proto index 9f2b67818a02..18f559778f19 100644 --- a/api/envoy/api/v3alpha/ratelimit/ratelimit.proto +++ b/api/envoy/api/v3alpha/ratelimit/ratelimit.proto @@ -54,12 +54,12 @@ import "validate/validate.proto"; message RateLimitDescriptor { message Entry { // Descriptor key. - string key = 1 [(validate.rules).string.min_bytes = 1]; + string key = 1 [(validate.rules).string = {min_bytes: 1}]; // Descriptor value. - string value = 2 [(validate.rules).string.min_bytes = 1]; + string value = 2 [(validate.rules).string = {min_bytes: 1}]; } // Descriptor entries. - repeated Entry entries = 1 [(validate.rules).repeated .min_items = 1]; + repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/api/v3alpha/rds.proto b/api/envoy/api/v3alpha/rds.proto index 36dfbc4a0fee..54b1086e8831 100644 --- a/api/envoy/api/v3alpha/rds.proto +++ b/api/envoy/api/v3alpha/rds.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha; option java_outer_classname = "RdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha"; - option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; @@ -90,7 +89,7 @@ message RouteConfiguration { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // that the connection manager encodes. @@ -103,7 +102,7 @@ message RouteConfiguration { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // routed by the HTTP connection manager. @@ -128,5 +127,5 @@ message RouteConfiguration { // [#not-implemented-hide:] message Vhds { // Configuration source specifier for VHDS. - envoy.api.v3alpha.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; + core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/api/v3alpha/route/route.proto b/api/envoy/api/v3alpha/route/route.proto index d4d60e929022..98eb78202d2d 100644 --- a/api/envoy/api/v3alpha/route/route.proto +++ b/api/envoy/api/v3alpha/route/route.proto @@ -5,7 +5,6 @@ package envoy.api.v3alpha.route; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v3alpha.route"; -option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; import "envoy/type/matcher/regex.proto"; @@ -31,9 +30,24 @@ import "validate/validate.proto"; // upstream cluster to route to or whether to perform a redirect. // [#comment:next free field: 17] message VirtualHost { + enum TlsRequirementType { + // No TLS requirement for the virtual host. + NONE = 0; + + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + EXTERNAL_ONLY = 1; + + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + ALL = 2; + } + + reserved 9; + // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. @@ -51,25 +65,12 @@ message VirtualHost { // The longest wildcards match first. // Only a single virtual host in the entire route configuration can match on ``*``. A domain // must be unique across all virtual hosts or the config will fail to load. - repeated string domains = 2 [(validate.rules).repeated .min_items = 1]; + repeated string domains = 2 [(validate.rules).repeated = {min_items: 1}]; // The list of routes that will be matched, in order, for incoming requests. // The first route that matches will be used. repeated Route routes = 3; - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - // Specifies the type of TLS enforcement the virtual host expects. If this option is not // specified, there is no TLS requirement for the virtual host. TlsRequirementType require_tls = 4; @@ -89,7 +90,7 @@ message VirtualHost { // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. @@ -102,7 +103,7 @@ message VirtualHost { // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. @@ -111,8 +112,6 @@ message VirtualHost { // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; - reserved 9; - // The per_filter_config field can be used to provide virtual host-specific // configurations for filters. The key should match the filter name, such as // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter @@ -157,11 +156,13 @@ message VirtualHost { // `. // [#comment:next free field: 15] message Route { + reserved 6; + // Name for the route. string name = 14; // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message.required = true]; + RouteMatch match = 1 [(validate.rules).message = {required: true}]; oneof action { option (validate.required) = true; @@ -186,8 +187,6 @@ message Route { // Decorator for the matched route. Decorator decorator = 5; - reserved 6; - // The per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter @@ -209,7 +208,7 @@ message Route { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // matching this route. @@ -222,7 +221,7 @@ message Route { // details on header value syntax, see the documentation on // :ref:`custom request headers `. repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. @@ -242,9 +241,11 @@ message Route { // [#comment:next free field: 11] message WeightedCluster { message ClusterWeight { + reserved 7; + // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, @@ -267,7 +268,7 @@ message WeightedCluster { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. @@ -281,14 +282,12 @@ message WeightedCluster { // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated .max_items = 1000]; + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. repeated string response_headers_to_remove = 6; - reserved 7; - // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter @@ -305,11 +304,11 @@ message WeightedCluster { } // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; // Specifies the total weight across all clusters. The sum of all cluster weights must equal this // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32.gte = 1]; + google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; // Specifies the runtime key prefix that should be used to construct the // runtime keys associated with each cluster. When the *runtime_key_prefix* is @@ -323,6 +322,11 @@ message WeightedCluster { } message RouteMatch { + message GrpcRouteMatchOptions { + } + + reserved 5; + oneof path_specifier { option (validate.required) = true; @@ -342,15 +346,15 @@ message RouteMatch { // // Examples: // - // * The regex */b[io]t* matches the path */bit* - // * The regex */b[io]t* matches the path */bot* - // * The regex */b[io]t* does not match the path */bite* - // * The regex */b[io]t* does not match the path */bit/bot* + // * The regex ``/b[io]t`` matches the path */bit* + // * The regex ``/b[io]t`` matches the path */bot* + // * The regex ``/b[io]t`` does not match the path */bite* + // * The regex ``/b[io]t`` does not match the path */bit/bot* // // .. attention:: // This field has been deprecated in favor of `safe_regex` as it is not safe for use with // untrusted input in all cases. - string regex = 3 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string regex = 3 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // If specified, the route is a regular expression rule meaning that the // regex must match the *:path* header once the query string is removed. The entire path @@ -364,15 +368,13 @@ message RouteMatch { // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] - type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message.required = true]; + type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; } // Indicates that prefix/path matching should be case insensitive. The default // is true. google.protobuf.BoolValue case_sensitive = 4; - reserved 5; - // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the @@ -405,9 +407,6 @@ message RouteMatch { // query string for a match to occur. repeated QueryParameterMatcher query_parameters = 7; - message GrpcRouteMatchOptions { - } - // If specified, only gRPC requests will be matched. The router will check // that the content-type header has a application/grpc or one of the various // application/grpc+ values. @@ -432,7 +431,7 @@ message CorsPolicy { // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for // use with untrusted input in all cases. repeated string allow_origin_regex = 8 - [(validate.rules).repeated .items.string.max_bytes = 1024, deprecated = true]; + [(validate.rules).repeated = {items {string {max_bytes: 1024}}}, deprecated = true]; // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. @@ -489,12 +488,172 @@ message CorsPolicy { // [#comment:next free field: 30] message RouteAction { + enum ClusterNotFoundResponseCode { + // HTTP status code - 503 Service Unavailable. + SERVICE_UNAVAILABLE = 0; + + // HTTP status code - 404 Not Found. + NOT_FOUND = 1; + } + + // Configures :ref:`internal redirect ` behavior. + enum InternalRedirectAction { + PASS_THROUGH_INTERNAL_REDIRECT = 0; + HANDLE_INTERNAL_REDIRECT = 1; + } + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is + // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. + message RequestMirrorPolicy { + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If not specified, all requests to the target cluster will be mirrored. If + // specified, Envoy will lookup the runtime key to get the % of requests to + // mirror. Valid values are from 0 to 10000, allowing for increments of + // 0.01% of requests to be mirrored. If the runtime key is specified in the + // configuration but not present in runtime, 0 is the default and thus 0% of + // requests will be mirrored. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`runtime_fraction + // ` field instead. + string runtime_key = 2 [deprecated = true]; + + // If both :ref:`runtime_key + // ` and this field are not + // specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the `runtime_key` field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a :ref:`FractionalPercent ` proto represented + // as JSON/YAML and may also be represented as an integer with the assumption that the value + // is an integral percentage out of 100. For instance, a runtime key lookup returning the + // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is + // HUNDRED. This is behaviour is different to that of the deprecated `runtime_key` field, + // where the implicit denominator is 10000. + core.RuntimeFractionalPercent runtime_fraction = 3; + } + + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + // `. + message HashPolicy { + message Header { + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // Envoy supports two types of cookie affinity: + // + // 1. Passive. Envoy takes a cookie that's present in the cookies header and + // hashes on its value. + // + // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + // on the first request from the client in its response to the client, + // based on the endpoint the request gets sent to. The client then + // presents this on the next and all subsequent requests. The hash of + // this is sufficient to ensure these requests get sent to the same + // endpoint. The cookie is generated by hashing the source and + // destination ports and addresses so that multiple independent HTTP2 + // streams on the same connection will independently receive the same + // cookie, even if they arrive at the Envoy simultaneously. + message Cookie { + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + google.protobuf.Duration ttl = 2; + + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + string path = 3; + } + + message ConnectionProperties { + // Hash on source IP address. + bool source_ip = 1; + } + + oneof policy_specifier { + option (validate.required) = true; + + // Header hash policy. + Header header = 1; + + // Cookie hash policy. + Cookie cookie = 2; + + // Connection properties hash policy. + ConnectionProperties connection_properties = 3; + } + + // The flag that shortcircuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; + } + + // Allows enabling and disabling upgrades on a per-route basis. + // This overrides any enabled/disabled upgrade filter chain specified in the + // HttpConnectionManager + // :ref:upgrade_configs` + // ` + // but does not affect any custom filter chain specified there. + message UpgradeConfig { + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + string upgrade_type = 1; + + // Determines if upgrades are available on this route. Defaults to true. + google.protobuf.BoolValue enabled = 2; + } + + reserved 12, 18, 19, 16, 22, 21; + oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the @@ -505,7 +664,7 @@ message RouteAction { // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - string cluster_header = 2 [(validate.rules).string.min_bytes = 1]; + string cluster_header = 2 [(validate.rules).string = {min_bytes: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -515,18 +674,10 @@ message RouteAction { WeightedCluster weighted_clusters = 3; } - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - // The HTTP status code to use when configured cluster is not found. // The default response code is 503 Service Unavailable. ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum.defined_only = true]; + [(validate.rules).enum = {defined_only: true}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what's set in this field will be considered @@ -625,55 +776,6 @@ message RouteAction { // (e.g.: policies are not merged, most internal one becomes the enforced policy). RetryPolicy retry_policy = 9; - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; - - // If not specified, all requests to the target cluster will be mirrored. If - // specified, Envoy will lookup the runtime key to get the % of requests to - // mirror. Valid values are from 0 to 10000, allowing for increments of - // 0.01% of requests to be mirrored. If the runtime key is specified in the - // configuration but not present in runtime, 0 is the default and thus 0% of - // requests will be mirrored. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`runtime_fraction - // ` field instead. - string runtime_key = 2 [deprecated = true]; - - // If both :ref:`runtime_key - // ` and this field are not - // specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a :ref:`FractionalPercent ` proto represented - // as JSON/YAML and may also be represented as an integer with the assumption that the value - // is an integral percentage out of 100. For instance, a runtime key lookup returning the - // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is - // HUNDRED. This is behaviour is different to that of the deprecated `runtime_key` field, - // where the implicit denominator is 10000. - core.RuntimeFractionalPercent runtime_fraction = 3; - } - // Indicates that the route has a request mirroring policy. RequestMirrorPolicy request_mirror_policy = 10; @@ -682,10 +784,6 @@ message RouteAction { // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] core.RoutingPriority priority = 11; - reserved 12; - reserved 18; - reserved 19; - // Specifies a set of rate limit configurations that could be applied to the // route. repeated RateLimit rate_limits = 13; @@ -696,85 +794,6 @@ message RouteAction { // request. google.protobuf.BoolValue include_vh_rate_limits = 14; - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - message HashPolicy { - message Header { - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 [(validate.rules).string.min_bytes = 1]; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - // Hash on source IP address. - bool source_ip = 1; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - } - - // The flag that shortcircuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to // route the request. The method of combination is deterministic such that @@ -789,14 +808,9 @@ message RouteAction { // ignoring the rest of the hash policy list. repeated HashPolicy hash_policy = 15; - reserved 16; - reserved 22; - // Indicates that the route has a CORS policy. CorsPolicy cors = 17; - reserved 21; - // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, // or its default value (infinity) instead of @@ -818,27 +832,8 @@ message RouteAction { // infinity). google.protobuf.Duration grpc_timeout_offset = 28; - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:upgrade_configs` - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1; - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - }; repeated UpgradeConfig upgrade_configs = 25; - // Configures :ref:`internal redirect ` behavior. - enum InternalRedirectAction { - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } InternalRedirectAction internal_redirect_action = 26; // Indicates that the route has a hedge policy. Note that if this is set, @@ -848,8 +843,45 @@ message RouteAction { } // HTTP retry :ref:`architecture overview `. -// [#comment:next free field: 9] +// [#comment:next free field: 10] message RetryPolicy { + message RetryPriority { + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + message RetryHostPredicate { + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Struct config = 2; + + google.protobuf.Any typed_config = 3; + } + } + + message RetryBackOff { + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the `base_interval` if set. The default is 10 times the + // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; + } + // Specifies the conditions under which retry takes place. These are the same // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. @@ -873,29 +905,11 @@ message RetryPolicy { // would have been exhausted. google.protobuf.Duration per_try_timeout = 3; - message RetryPriority { - string name = 1 [(validate.rules).string.min_bytes = 1]; - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; - } - } - // Specifies an implementation of a RetryPriority which is used to determine the // distribution of load across priorities used for retries. Refer to // :ref:`retry plugin configuration ` for more details. RetryPriority retry_priority = 4; - message RetryHostPredicate { - string name = 1 [(validate.rules).string.min_bytes = 1]; - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; - } - } - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host // for retries. If any of the predicates reject the host, host selection will be reattempted. // Refer to :ref:`retry plugin configuration ` for more @@ -910,29 +924,17 @@ message RetryPolicy { // HTTP status codes that should trigger a retry in addition to those specified by retry_on. repeated uint32 retriable_status_codes = 7; - message RetryBackOff { - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true, - gt: {seconds: 0} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration.gt = {seconds: 0}]; - } - // Specifies parameters that control retry back off. This parameter is optional, in which case the // default base interval is 25 milliseconds or, if set, the current value of the // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` // describes Envoy's back-off algorithm. RetryBackOff retry_back_off = 8; + + // HTTP headers that trigger a retry if present in the response. A retry will be + // triggered if any of the header matches match the upstream response headers. + // The field is only consulted if 'retriable-headers' retry policy is active. + repeated HeaderMatcher retriable_headers = 9; } // HTTP request hedging :ref:`architecture overview `. @@ -941,13 +943,13 @@ message HedgePolicy { // Must be at least 1. // Defaults to 1. // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32.gte = 1]; + google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; // Specifies a probability that an additional upstream request should be sent // on top of what is specified by initial_requests. // Defaults to 0. // [#not-implemented-hide:] - envoy.type.FractionalPercent additional_request_chance = 2; + type.FractionalPercent additional_request_chance = 2; // Indicates that a hedged request should be sent when the per-try timeout // is hit. This will only occur if the retry policy also indicates that a @@ -961,6 +963,23 @@ message HedgePolicy { } message RedirectAction { + enum RedirectResponseCode { + // Moved Permanently HTTP Status Code - 301. + MOVED_PERMANENTLY = 0; + + // Found HTTP Status Code - 302. + FOUND = 1; + + // See Other HTTP Status Code - 303. + SEE_OTHER = 2; + + // Temporary Redirect HTTP Status Code - 307. + TEMPORARY_REDIRECT = 3; + + // Permanent Redirect HTTP Status Code - 308. + PERMANENT_REDIRECT = 4; + } + // When the scheme redirection take place, the following rules apply: // 1. If the source URI scheme is `http` and the port is explicitly // set to `:80`, the port will be removed after the redirection @@ -969,11 +988,14 @@ message RedirectAction { oneof scheme_rewrite_specifier { // The scheme portion of the URL will be swapped with "https". bool https_redirect = 4; + // The scheme portion of the URL will be swapped with this value. string scheme_redirect = 7; } + // The host portion of the URL will be swapped with this value. string host_redirect = 1; + // The port value of the URL will be swapped with this value. uint32 port_redirect = 8; @@ -992,26 +1014,9 @@ message RedirectAction { string prefix_rewrite = 5; } - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - // The HTTP status code to use in the redirect response. The default response // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum.defined_only = true]; + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; // Indicates that during redirection, the query portion of the URL will // be removed. Default value is false. @@ -1020,7 +1025,7 @@ message RedirectAction { message DirectResponseAction { // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {gte: 100, lt: 600}]; + uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; // Specifies the content of the response body. If this setting is omitted, // no body is included in the generated response. @@ -1042,25 +1047,24 @@ message Decorator { // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. - string operation = 1 [(validate.rules).string.min_bytes = 1]; + string operation = 1 [(validate.rules).string = {min_bytes: 1}]; } message Tracing { - // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% - envoy.type.FractionalPercent client_sampling = 1; + type.FractionalPercent client_sampling = 1; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.FractionalPercent random_sampling = 2; + type.FractionalPercent random_sampling = 2; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random @@ -1070,7 +1074,7 @@ message Tracing { // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.FractionalPercent overall_sampling = 3; + type.FractionalPercent overall_sampling = 3; } // A virtual cluster is a way of specifying a regex matching rule against @@ -1097,14 +1101,14 @@ message VirtualCluster { // // Examples: // - // * The regex */rides/\d+* matches the path */rides/0* - // * The regex */rides/\d+* matches the path */rides/123* - // * The regex */rides/\d+* does not match the path */rides/123/456* + // * The regex ``/rides/\d+`` matches the path */rides/0* + // * The regex ``/rides/\d+`` matches the path */rides/123* + // * The regex ``/rides/\d+`` does not match the path */rides/123/456* // // .. attention:: // This field has been deprecated in favor of `headers` as it is not safe for use with // untrusted input in all cases. - string pattern = 1 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string pattern = 1 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // Specifies a list of header matchers to use for matching requests. Each specified header must // match. The pseudo-headers `:path` and `:method` can be used to match the request path and @@ -1114,7 +1118,7 @@ message VirtualCluster { // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string.min_bytes = 1]; + string name = 2 [(validate.rules).string = {min_bytes: 1}]; // Optionally specifies the HTTP method to match on. For example GET, PUT, // etc. @@ -1126,18 +1130,6 @@ message VirtualCluster { // Global rate limiting :ref:`architecture overview `. message RateLimit { - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32.lte = 10]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - message Action { // The following descriptor entry is appended to the descriptor: // @@ -1178,10 +1170,10 @@ message RateLimit { // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. - string header_name = 1 [(validate.rules).string.min_bytes = 1]; + string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string.min_bytes = 1]; + string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; } // The following descriptor entry is appended to the descriptor and is populated using the @@ -1200,7 +1192,7 @@ message RateLimit { // ("generic_key", "") message GenericKey { // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; } // The following descriptor entry is appended to the descriptor: @@ -1210,7 +1202,7 @@ message RateLimit { // ("header_match", "") message HeaderValueMatch { // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a @@ -1223,7 +1215,7 @@ message RateLimit { // specified headers in the config. A match will happen if all the // headers in the config are present in the request with the same values // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated .min_items = 1]; + repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } oneof action_specifier { @@ -1249,13 +1241,25 @@ message RateLimit { } } + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + // A list of actions that are to be applied for this rate limit configuration. // Order matters as the actions are processed sequentially and the descriptor // is composed by appending descriptor entries in that sequence. If an action // cannot append a descriptor entry, no descriptor is generated for the // configuration. See :ref:`composing actions // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated .min_items = 1]; + repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; } // .. attention:: @@ -1283,14 +1287,10 @@ message RateLimit { // // [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] message HeaderMatcher { - // Specifies the name of the header in the request. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - reserved 2; // value deprecated by :ref:`exact_match - // ` + reserved 2, 3; - reserved 3; // regex deprecated by :ref:`regex_match - // ` + // Specifies the name of the header in the request. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { @@ -1304,14 +1304,14 @@ message HeaderMatcher { // // Examples: // - // * The regex *\d{3}* matches the value *123* - // * The regex *\d{3}* does not match the value *1234* - // * The regex *\d{3}* does not match the value *123.456* + // * The regex ``\d{3}`` matches the value *123* + // * The regex ``\d{3}`` does not match the value *1234* + // * The regex ``\d{3}`` does not match the value *123.456* // // .. attention:: // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use // with untrusted input in all cases. - string regex_match = 5 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string regex_match = 5 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the @@ -1329,7 +1329,7 @@ message HeaderMatcher { // // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, // "-1somestring" - envoy.type.Int64Range range_match = 6; + type.Int64Range range_match = 6; // If specified, header match will be performed based on whether the header is in the // request. @@ -1341,7 +1341,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string.min_bytes = 1]; + string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1349,14 +1349,14 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string.min_bytes = 1]; + string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; } // If specified, the match result will be inverted before checking. Defaults to false. // // Examples: // - // * The regex *\d{3}* does not match the value *1234*, so it will match when inverted. + // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. // * The range [-10,0) will match the value -1, so it will not match when inverted. bool invert_match = 8; } @@ -1366,7 +1366,7 @@ message HeaderMatcher { message QueryParameterMatcher { // Specifies the name of a key that must be present in the requested // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1, max_bytes: 1024}]; + string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; // Specifies the value of the key. If the value is absent, a request // that contains the key in its query string will match, whether the @@ -1379,7 +1379,7 @@ message QueryParameterMatcher { // Specifies whether the query parameter value is a regular expression. // Defaults to false. The entire query parameter value (i.e., the part to // the right of the equals sign in "key=value") must match the regex. - // E.g., the regex "\d+$" will match "123" but not "a123" or "123a". + // E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. // // ..attention:: // This field is deprecated. Use a `safe_regex` match inside the `string_match` field. @@ -1387,7 +1387,7 @@ message QueryParameterMatcher { oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. - type.matcher.StringMatcher string_match = 5 [(validate.rules).message.required = true]; + type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; diff --git a/api/envoy/api/v3alpha/srds.proto b/api/envoy/api/v3alpha/srds.proto index 636ba3917ecc..63195aaf63c0 100644 --- a/api/envoy/api/v3alpha/srds.proto +++ b/api/envoy/api/v3alpha/srds.proto @@ -2,15 +2,17 @@ syntax = "proto3"; package envoy.api.v3alpha; -import "envoy/api/v3alpha/discovery.proto"; -import "google/api/annotations.proto"; -import "validate/validate.proto"; - option java_outer_classname = "SrdsProto"; -option java_package = "io.envoyproxy.envoy.api.v3alpha"; option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.api.v3alpha"; option java_generic_services = true; +import "envoy/api/v3alpha/discovery.proto"; + +import "google/api/annotations.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` // @@ -99,9 +101,6 @@ service ScopedRoutesDiscoveryService { // [#comment:next free field: 4] // [#proto-status: experimental] message ScopedRouteConfiguration { - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string.min_bytes = 1]; - // Specifies a key which is matched against the output of the // :ref:`scope_key_builder` // specified in the HttpConnectionManager. The matching is done per HTTP @@ -120,14 +119,17 @@ message ScopedRouteConfiguration { // The ordered set of fragments to match against. The order must match the // fragments in the corresponding // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated .min_items = 1]; + repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } + // The name assigned to the routing scope. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated // with this scope. - string route_configuration_name = 2 [(validate.rules).string.min_bytes = 1]; + string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; // The key to match against. - Key key = 3 [(validate.rules).message.required = true]; + Key key = 3 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/accesslog/v2/als.proto b/api/envoy/config/accesslog/v2/als.proto index c02835dbbc56..1f7a0e61d42c 100644 --- a/api/envoy/config/accesslog/v2/als.proto +++ b/api/envoy/config/accesslog/v2/als.proto @@ -20,7 +20,7 @@ import "validate/validate.proto"; // populate :ref:`StreamAccessLogsMessage.http_logs // `. message HttpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers // `. @@ -38,7 +38,7 @@ message HttpGrpcAccessLogConfig { // Configuration for the built-in *envoy.tcp_grpc_access_log* type. This configuration will // populate *StreamAccessLogsMessage.tcp_logs*. message TcpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; } // Common configuration for gRPC access logs. @@ -46,15 +46,15 @@ message CommonGrpcAccessLogConfig { // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier // `. This allows the // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string.min_bytes = 1]; + string log_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The gRPC service for the access log service. - envoy.api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; + api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to // 1 second. - google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it diff --git a/api/envoy/config/accesslog/v2/file.proto b/api/envoy/config/accesslog/v2/file.proto index b88529a3251d..9ed71469882b 100644 --- a/api/envoy/config/accesslog/v2/file.proto +++ b/api/envoy/config/accesslog/v2/file.proto @@ -6,9 +6,10 @@ option java_outer_classname = "FileProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; -import "validate/validate.proto"; import "google/protobuf/struct.proto"; +import "validate/validate.proto"; + // [#protodoc-title: File access log] // Custom configuration for an :ref:`AccessLog ` @@ -16,7 +17,7 @@ import "google/protobuf/struct.proto"; // AccessLog. message FileAccessLog { // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string.min_bytes = 1]; + string path = 1 [(validate.rules).string = {min_bytes: 1}]; // Access log format. Envoy supports :ref:`custom access log formats // ` as well as a :ref:`default format diff --git a/api/envoy/config/accesslog/v3alpha/als.proto b/api/envoy/config/accesslog/v3alpha/als.proto index 07ec724d10ef..c7fa8da334f9 100644 --- a/api/envoy/config/accesslog/v3alpha/als.proto +++ b/api/envoy/config/accesslog/v3alpha/als.proto @@ -20,7 +20,7 @@ import "validate/validate.proto"; // will populate :ref:`StreamAccessLogsMessage.http_logs // `. message HttpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers // `. @@ -38,7 +38,7 @@ message HttpGrpcAccessLogConfig { // Configuration for the built-in *envoy.tcp_grpc_access_log* type. This configuration will // populate *StreamAccessLogsMessage.tcp_logs*. message TcpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; } // Common configuration for gRPC access logs. @@ -46,15 +46,15 @@ message CommonGrpcAccessLogConfig { // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier // `. This allows the // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string.min_bytes = 1]; + string log_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The gRPC service for the access log service. - envoy.api.v3alpha.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; + api.v3alpha.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to // 1 second. - google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it diff --git a/api/envoy/config/accesslog/v3alpha/file.proto b/api/envoy/config/accesslog/v3alpha/file.proto index 2f32da7bb64f..d8b033735e79 100644 --- a/api/envoy/config/accesslog/v3alpha/file.proto +++ b/api/envoy/config/accesslog/v3alpha/file.proto @@ -6,9 +6,10 @@ option java_outer_classname = "FileProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.accesslog.v3alpha"; -import "validate/validate.proto"; import "google/protobuf/struct.proto"; +import "validate/validate.proto"; + // [#protodoc-title: File access log] // Custom configuration for an :ref:`AccessLog @@ -16,7 +17,7 @@ import "google/protobuf/struct.proto"; // file. Configures the built-in *envoy.file_access_log* AccessLog. message FileAccessLog { // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string.min_bytes = 1]; + string path = 1 [(validate.rules).string = {min_bytes: 1}]; // Access log format. Envoy supports :ref:`custom access log formats // ` as well as a :ref:`default format diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto index b8f17de61870..07d7285f0060 100644 --- a/api/envoy/config/bootstrap/v2/bootstrap.proto +++ b/api/envoy/config/bootstrap/v2/bootstrap.proto @@ -1,8 +1,3 @@ -// [#protodoc-title: Bootstrap] -// This proto is supplied via the :option:`-c` CLI flag and acts as the root -// of the Envoy v2 configuration. See the :ref:`v2 configuration overview -// ` for more detail. - syntax = "proto3"; package envoy.config.bootstrap.v2; @@ -11,31 +6,32 @@ option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.bootstrap.v2"; +import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/cds.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/auth/cert.proto"; import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/cds.proto"; import "envoy/api/v2/lds.proto"; -import "envoy/config/trace/v2/trace.proto"; import "envoy/config/metrics/v2/stats.proto"; import "envoy/config/overload/v2alpha/overload.proto"; +import "envoy/config/trace/v2/trace.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "validate/validate.proto"; +// [#protodoc-title: Bootstrap] +// This proto is supplied via the :option:`-c` CLI flag and acts as the root +// of the Envoy v2 configuration. See the :ref:`v2 configuration overview +// ` for more detail. + // Bootstrap :ref:`configuration overview `. message Bootstrap { - // Node identity to present to the management server and for instance - // identification purposes (e.g. in generated headers). - envoy.api.v2.core.Node node = 1; - message StaticResources { // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. - repeated envoy.api.v2.Listener listeners = 1; + repeated api.v2.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's necessary @@ -43,24 +39,24 @@ message Bootstrap { // how to speak to the management server. These cluster definitions may not // use :ref:`EDS ` (i.e. they should be static // IP or DNS-based). - repeated envoy.api.v2.Cluster clusters = 2; + repeated api.v2.Cluster clusters = 2; // These static secrets can be used by :ref:`SdsSecretConfig // ` - repeated envoy.api.v2.auth.Secret secrets = 3; + repeated api.v2.auth.Secret secrets = 3; } - // Statically specified resources. - StaticResources static_resources = 2; message DynamicResources { + reserved 4; + // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. - envoy.api.v2.core.ConfigSource lds_config = 1; + api.v2.core.ConfigSource lds_config = 1; // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. - envoy.api.v2.core.ConfigSource cds_config = 2; + api.v2.core.ConfigSource cds_config = 2; // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type @@ -69,10 +65,18 @@ message Bootstrap { // :ref:`ConfigSources ` that have // the :ref:`ads ` field set will be // streamed on the ADS channel. - envoy.api.v2.core.ApiConfigSource ads_config = 3; - - reserved 4; + api.v2.core.ApiConfigSource ads_config = 3; } + + reserved 10; + + // Node identity to present to the management server and for instance + // identification purposes (e.g. in generated headers). + api.v2.core.Node node = 1; + + // Statically specified resources. + StaticResources static_resources = 2; + // xDS configuration sources. DynamicResources dynamic_resources = 3; @@ -82,16 +86,16 @@ message Bootstrap { // Health discovery service config option. // (:ref:`core.ApiConfigSource `) - envoy.api.v2.core.ApiConfigSource hds_config = 14; + api.v2.core.ApiConfigSource hds_config = 14; // Optional file system path to search for startup flag files. string flags_path = 5; // Optional set of stats sinks. - repeated envoy.config.metrics.v2.StatsSink stats_sinks = 6; + repeated metrics.v2.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. - envoy.config.metrics.v2.StatsConfig stats_config = 13; + metrics.v2.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and @@ -99,8 +103,8 @@ message Bootstrap { // seconds). // Duration must be at least 1ms and at most 5 min. google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { - lt: {seconds: 300}, - gte: {nanos: 1000000} + lt {seconds: 300} + gte {nanos: 1000000} }]; // Optional watchdog configuration. @@ -108,9 +112,7 @@ message Bootstrap { // Configuration for an external tracing provider. If not specified, no // tracing will be performed. - envoy.config.trace.v2.Tracing tracing = 9; - - reserved 10; + trace.v2.Tracing tracing = 9; // Configuration for the runtime configuration provider (deprecated). If not // specified, a “null” provider will be used which will result in all defaults @@ -126,7 +128,7 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - envoy.config.overload.v2alpha.OverloadManager overload_manager = 15; + overload.v2alpha.OverloadManager overload_manager = 15; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This @@ -162,15 +164,20 @@ message Admin { // The TCP address that the administration server will listen on. // If not specified, Envoy will not start an administration server. - envoy.api.v2.core.Address address = 3; + api.v2.core.Address address = 3; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. - repeated envoy.api.v2.core.SocketOption socket_options = 4; + repeated api.v2.core.SocketOption socket_options = 4; } // Cluster manager :ref:`architecture overview `. message ClusterManager { + message OutlierDetection { + // Specifies the path to the outlier event log. + string event_log_path = 1; + } + // Name of the local cluster (i.e., the cluster that owns the Envoy running // this configuration). In order to enable :ref:`zone aware routing // ` this option must be set. @@ -182,33 +189,30 @@ message ClusterManager { // routing `_. string local_cluster_name = 1; - message OutlierDetection { - // Specifies the path to the outlier event log. - string event_log_path = 1; - } // Optional global configuration for outlier detection. OutlierDetection outlier_detection = 2; // Optional configuration used to bind newly established upstream connections. // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. - envoy.api.v2.core.BindConfig upstream_bind_config = 3; + api.v2.core.BindConfig upstream_bind_config = 3; // A management server endpoint to stream load stats to via // *StreamLoadStats*. This must have :ref:`api_type // ` :ref:`GRPC // `. - envoy.api.v2.core.ApiConfigSource load_stats_config = 4; + api.v2.core.ApiConfigSource load_stats_config = 4; } // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. +// See the :ref:`watchdog documentation ` for more information. message Watchdog { // The duration after which Envoy counts a nonresponsive thread in the - // *server.watchdog_miss* statistic. If not specified the default is 200ms. + // *watchdog_miss* statistic. If not specified the default is 200ms. google.protobuf.Duration miss_timeout = 1; // The duration after which Envoy counts a nonresponsive thread in the - // *server.watchdog_mega_miss* statistic. If not specified the default is + // *watchdog_mega_miss* statistic. If not specified the default is // 1000ms. google.protobuf.Duration megamiss_timeout = 2; @@ -285,23 +289,26 @@ message RuntimeLayer { string name = 1; // RTDS configuration source. - envoy.api.v2.core.ConfigSource rtds_config = 2; + api.v2.core.ConfigSource rtds_config = 2; } // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof layer_specifier { + option (validate.required) = true; + // :ref:`Static runtime ` layer. // This follows the :ref:`runtime protobuf JSON representation encoding // `. Unlike static xDS resources, this static // layer is overridable by later layers in the runtime virtual filesystem. - option (validate.required) = true; - google.protobuf.Struct static_layer = 2; + DiskLayer disk_layer = 3; + AdminLayer admin_layer = 4; + RtdsLayer rtds_layer = 5; } } diff --git a/api/envoy/config/bootstrap/v3alpha/bootstrap.proto b/api/envoy/config/bootstrap/v3alpha/bootstrap.proto index f3ac2e8342ea..4b26872f945c 100644 --- a/api/envoy/config/bootstrap/v3alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3alpha/bootstrap.proto @@ -1,8 +1,3 @@ -// [#protodoc-title: Bootstrap] -// This proto is supplied via the :option:`-c` CLI flag and acts as the root -// of the Envoy v2 configuration. See the :ref:`v2 configuration overview -// ` for more detail. - syntax = "proto3"; package envoy.config.bootstrap.v3alpha; @@ -11,55 +6,56 @@ option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.bootstrap.v3alpha"; +import "envoy/api/v3alpha/auth/cert.proto"; +import "envoy/api/v3alpha/cds.proto"; import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/core/base.proto"; -import "envoy/api/v3alpha/auth/cert.proto"; import "envoy/api/v3alpha/core/config_source.proto"; -import "envoy/api/v3alpha/cds.proto"; import "envoy/api/v3alpha/lds.proto"; -import "envoy/config/trace/v3alpha/trace.proto"; import "envoy/config/metrics/v3alpha/stats.proto"; import "envoy/config/overload/v3alpha/overload.proto"; +import "envoy/config/trace/v3alpha/trace.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "validate/validate.proto"; +// [#protodoc-title: Bootstrap] +// This proto is supplied via the :option:`-c` CLI flag and acts as the root +// of the Envoy v2 configuration. See the :ref:`v2 configuration overview +// ` for more detail. + // Bootstrap :ref:`configuration overview `. message Bootstrap { - // Node identity to present to the management server and for instance - // identification purposes (e.g. in generated headers). - envoy.api.v3alpha.core.Node node = 1; - message StaticResources { // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. - repeated envoy.api.v3alpha.Listener listeners = 1; + repeated api.v3alpha.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's // necessary to have some initial cluster definitions available to allow Envoy to know how to // speak to the management server. These cluster definitions may not use :ref:`EDS // ` (i.e. they should be static IP or DNS-based). - repeated envoy.api.v3alpha.Cluster clusters = 2; + repeated api.v3alpha.Cluster clusters = 2; // These static secrets can be used by :ref:`SdsSecretConfig // ` - repeated envoy.api.v3alpha.auth.Secret secrets = 3; + repeated api.v3alpha.auth.Secret secrets = 3; } - // Statically specified resources. - StaticResources static_resources = 2; message DynamicResources { + reserved 4; + // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. - envoy.api.v3alpha.core.ConfigSource lds_config = 1; + api.v3alpha.core.ConfigSource lds_config = 1; // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. - envoy.api.v3alpha.core.ConfigSource cds_config = 2; + api.v3alpha.core.ConfigSource cds_config = 2; // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type @@ -68,10 +64,18 @@ message Bootstrap { // :ref:`ConfigSources ` that have // the :ref:`ads ` field set will be // streamed on the ADS channel. - envoy.api.v3alpha.core.ApiConfigSource ads_config = 3; - - reserved 4; + api.v3alpha.core.ApiConfigSource ads_config = 3; } + + reserved 10; + + // Node identity to present to the management server and for instance + // identification purposes (e.g. in generated headers). + api.v3alpha.core.Node node = 1; + + // Statically specified resources. + StaticResources static_resources = 2; + // xDS configuration sources. DynamicResources dynamic_resources = 3; @@ -81,16 +85,16 @@ message Bootstrap { // Health discovery service config option. // (:ref:`core.ApiConfigSource `) - envoy.api.v3alpha.core.ApiConfigSource hds_config = 14; + api.v3alpha.core.ApiConfigSource hds_config = 14; // Optional file system path to search for startup flag files. string flags_path = 5; // Optional set of stats sinks. - repeated envoy.config.metrics.v3alpha.StatsSink stats_sinks = 6; + repeated metrics.v3alpha.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. - envoy.config.metrics.v3alpha.StatsConfig stats_config = 13; + metrics.v3alpha.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and @@ -98,8 +102,8 @@ message Bootstrap { // seconds). // Duration must be at least 1ms and at most 5 min. google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { - lt: {seconds: 300}, - gte: {nanos: 1000000} + lt {seconds: 300} + gte {nanos: 1000000} }]; // Optional watchdog configuration. @@ -107,9 +111,7 @@ message Bootstrap { // Configuration for an external tracing provider. If not specified, no // tracing will be performed. - envoy.config.trace.v3alpha.Tracing tracing = 9; - - reserved 10; + trace.v3alpha.Tracing tracing = 9; // Configuration for the runtime configuration provider (deprecated). If not // specified, a “null” provider will be used which will result in all defaults @@ -125,7 +127,7 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - envoy.config.overload.v3alpha.OverloadManager overload_manager = 15; + overload.v3alpha.OverloadManager overload_manager = 15; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This @@ -161,15 +163,20 @@ message Admin { // The TCP address that the administration server will listen on. // If not specified, Envoy will not start an administration server. - envoy.api.v3alpha.core.Address address = 3; + api.v3alpha.core.Address address = 3; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. - repeated envoy.api.v3alpha.core.SocketOption socket_options = 4; + repeated api.v3alpha.core.SocketOption socket_options = 4; } // Cluster manager :ref:`architecture overview `. message ClusterManager { + message OutlierDetection { + // Specifies the path to the outlier event log. + string event_log_path = 1; + } + // Name of the local cluster (i.e., the cluster that owns the Envoy running // this configuration). In order to enable :ref:`zone aware routing // ` this option must be set. @@ -181,33 +188,30 @@ message ClusterManager { // `_. string local_cluster_name = 1; - message OutlierDetection { - // Specifies the path to the outlier event log. - string event_log_path = 1; - } // Optional global configuration for outlier detection. OutlierDetection outlier_detection = 2; // Optional configuration used to bind newly established upstream connections. // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. - envoy.api.v3alpha.core.BindConfig upstream_bind_config = 3; + api.v3alpha.core.BindConfig upstream_bind_config = 3; // A management server endpoint to stream load stats to via // *StreamLoadStats*. This must have :ref:`api_type // ` :ref:`GRPC // `. - envoy.api.v3alpha.core.ApiConfigSource load_stats_config = 4; + api.v3alpha.core.ApiConfigSource load_stats_config = 4; } // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. +// See the :ref:`watchdog documentation ` for more information. message Watchdog { // The duration after which Envoy counts a nonresponsive thread in the - // *server.watchdog_miss* statistic. If not specified the default is 200ms. + // *watchdog_miss* statistic. If not specified the default is 200ms. google.protobuf.Duration miss_timeout = 1; // The duration after which Envoy counts a nonresponsive thread in the - // *server.watchdog_mega_miss* statistic. If not specified the default is + // *watchdog_mega_miss* statistic. If not specified the default is // 1000ms. google.protobuf.Duration megamiss_timeout = 2; @@ -284,23 +288,26 @@ message RuntimeLayer { string name = 1; // RTDS configuration source. - envoy.api.v3alpha.core.ConfigSource rtds_config = 2; + api.v3alpha.core.ConfigSource rtds_config = 2; } // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof layer_specifier { + option (validate.required) = true; + // :ref:`Static runtime ` layer. // This follows the :ref:`runtime protobuf JSON representation encoding // `. Unlike static xDS resources, this static // layer is overridable by later layers in the runtime virtual filesystem. - option (validate.required) = true; - google.protobuf.Struct static_layer = 2; + DiskLayer disk_layer = 3; + AdminLayer admin_layer = 4; + RtdsLayer rtds_layer = 5; } } diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto index c6d47807ce50..9e4626c23e89 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.cluster.dynamic_forward_proxy.v2alpha; -option java_outer_classname = "DynamicForwardProxyClusterProto"; +option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha"; @@ -19,5 +19,5 @@ message ClusterConfig { // match that of associated :ref:`dynamic forward proxy HTTP filter configuration // `. common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto index 6bc7bdd4c551..be96cbf83a81 100644 --- a/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto +++ b/api/envoy/config/cluster/dynamic_forward_proxy/v3alpha/cluster.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.cluster.dynamic_forward_proxy.v3alpha; -option java_outer_classname = "DynamicForwardProxyClusterProto"; +option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v3alpha"; @@ -19,5 +19,5 @@ message ClusterConfig { // match that of associated :ref:`dynamic forward proxy HTTP filter configuration // `. common.dynamic_forward_proxy.v3alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/cluster/redis/redis_cluster.proto b/api/envoy/config/cluster/redis/redis_cluster.proto index fabaa0274fb7..c418de9f54e4 100644 --- a/api/envoy/config/cluster/redis/redis_cluster.proto +++ b/api/envoy/config/cluster/redis/redis_cluster.proto @@ -43,8 +43,8 @@ import "validate/validate.proto"; message RedisClusterConfig { // Interval between successive topology refresh requests. If not set, this defaults to 5s. - google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}]; // Timeout for topology refresh request. If not set, this defaults to 3s. - google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}]; } diff --git a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto index f7c796fe90f6..7d9b7d329eca 100644 --- a/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto +++ b/api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto @@ -23,7 +23,7 @@ message DnsCacheConfig { // configurations with the same name *must* otherwise have the same settings when referenced // from different configuration components. Configuration will fail to load if this is not // the case. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The DNS lookup family to use during resolution. // @@ -33,7 +33,8 @@ message DnsCacheConfig { // likely build a "happy eyeballs" connection pool which would race the primary / fall back // address and return the one that wins. This same method could potentially also be used for // QUIC to TCP fall back.] - api.v2.Cluster.DnsLookupFamily dns_lookup_family = 2 [(validate.rules).enum.defined_only = true]; + api.v2.Cluster.DnsLookupFamily dns_lookup_family = 2 + [(validate.rules).enum = {defined_only: true}]; // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. // @@ -41,7 +42,7 @@ message DnsCacheConfig { // // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be // added in a future change. - google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gt {}}]; // The TTL for hosts that are unused. Hosts that have not been used in the configured time // interval will be purged. If not specified defaults to 5m. @@ -55,7 +56,7 @@ message DnsCacheConfig { // .. note: // // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. - google.protobuf.Duration host_ttl = 4 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. // @@ -64,5 +65,5 @@ message DnsCacheConfig { // The implementation is approximate and enforced independently on each worker thread, thus // it is possible for the maximum hosts in the cache to go slightly above the configured // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32.gt = 0]; + google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; } diff --git a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto index 7b8a67be4333..10852816e44d 100644 --- a/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto +++ b/api/envoy/config/common/dynamic_forward_proxy/v3alpha/dns_cache.proto @@ -23,7 +23,7 @@ message DnsCacheConfig { // configurations with the same name *must* otherwise have the same settings when referenced // from different configuration components. Configuration will fail to load if this is not // the case. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The DNS lookup family to use during resolution. // @@ -34,7 +34,7 @@ message DnsCacheConfig { // address and return the one that wins. This same method could potentially also be used for // QUIC to TCP fall back.] api.v3alpha.Cluster.DnsLookupFamily dns_lookup_family = 2 - [(validate.rules).enum.defined_only = true]; + [(validate.rules).enum = {defined_only: true}]; // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. // @@ -42,7 +42,7 @@ message DnsCacheConfig { // // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be // added in a future change. - google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gt {}}]; // The TTL for hosts that are unused. Hosts that have not been used in the configured time // interval will be purged. If not specified defaults to 5m. @@ -56,7 +56,7 @@ message DnsCacheConfig { // .. note: // // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. - google.protobuf.Duration host_ttl = 4 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. // @@ -65,5 +65,5 @@ message DnsCacheConfig { // The implementation is approximate and enforced independently on each worker thread, thus // it is possible for the maximum hosts in the cache to go slightly above the configured // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32.gt = 0]; + google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; } diff --git a/api/envoy/config/common/tap/v2alpha/common.proto b/api/envoy/config/common/tap/v2alpha/common.proto index ac640b83e4fb..391bed13c69c 100644 --- a/api/envoy/config/common/tap/v2alpha/common.proto +++ b/api/envoy/config/common/tap/v2alpha/common.proto @@ -1,28 +1,27 @@ syntax = "proto3"; -import "envoy/service/tap/v2alpha/common.proto"; -import "envoy/api/v2/core/config_source.proto"; - -import "validate/validate.proto"; - package envoy.config.common.tap.v2alpha; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.common.tap.v2alpha"; +import "envoy/api/v2/core/config_source.proto"; +import "envoy/service/tap/v2alpha/common.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: Common tap extension configuration] // Common configuration for all tap extensions. message CommonExtensionConfig { - // [#not-implemented-hide:] message TapDSConfig { // Configuration for the source of TapDS updates for this Cluster. - envoy.api.v2.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; + api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // Tap config to request from XDS server. - string name = 2 [(validate.rules).string.min_bytes = 1]; + string name = 2 [(validate.rules).string = {min_bytes: 1}]; } oneof config_type { @@ -45,5 +44,5 @@ message CommonExtensionConfig { message AdminConfig { // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string.min_bytes = 1]; + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/config/common/tap/v3alpha/common.proto b/api/envoy/config/common/tap/v3alpha/common.proto index c260d04afa15..74c23b9c0123 100644 --- a/api/envoy/config/common/tap/v3alpha/common.proto +++ b/api/envoy/config/common/tap/v3alpha/common.proto @@ -1,29 +1,27 @@ syntax = "proto3"; -import "envoy/service/tap/v3alpha/common.proto"; -import "envoy/api/v3alpha/core/config_source.proto"; - -import "validate/validate.proto"; - package envoy.config.common.tap.v3alpha; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.common.tap.v3alpha"; +import "envoy/api/v3alpha/core/config_source.proto"; +import "envoy/service/tap/v3alpha/common.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: Common tap extension configuration] // Common configuration for all tap extensions. message CommonExtensionConfig { - // [#not-implemented-hide:] message TapDSConfig { // Configuration for the source of TapDS updates for this Cluster. - envoy.api.v3alpha.core.ConfigSource config_source = 1 - [(validate.rules).message.required = true]; + api.v3alpha.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // Tap config to request from XDS server. - string name = 2 [(validate.rules).string.min_bytes = 1]; + string name = 2 [(validate.rules).string = {min_bytes: 1}]; } oneof config_type { @@ -46,5 +44,5 @@ message CommonExtensionConfig { message AdminConfig { // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string.min_bytes = 1]; + string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/config/filter/accesslog/v2/accesslog.proto b/api/envoy/config/filter/accesslog/v2/accesslog.proto index d777708175b5..8810e050e95f 100644 --- a/api/envoy/config/filter/accesslog/v2/accesslog.proto +++ b/api/envoy/config/filter/accesslog/v2/accesslog.proto @@ -98,22 +98,22 @@ message ComparisonFilter { } // Comparison operator. - Op op = 1 [(validate.rules).enum.defined_only = true]; + Op op = 1 [(validate.rules).enum = {defined_only: true}]; // Value to compare against. - envoy.api.v2.core.RuntimeUInt32 value = 2; + api.v2.core.RuntimeUInt32 value = 2; } // Filters on HTTP response/status code. message StatusCodeFilter { // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters on total request duration in milliseconds. message DurationFilter { // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters for requests that are not health check requests. A health check @@ -130,10 +130,10 @@ message TraceableFilter { message RuntimeFilter { // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. // If found in runtime, this value will replace the default numerator. - string runtime_key = 1 [(validate.rules).string.min_bytes = 1]; + string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. - envoy.type.FractionalPercent percent_sampled = 2; + type.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header // :ref:`x-request-id` being present. If @@ -154,21 +154,21 @@ message RuntimeFilter { // Filters are evaluated sequentially and if one of them returns false, the // filter returns false immediately. message AndFilter { - repeated AccessLogFilter filters = 1 [(validate.rules).repeated .min_items = 2]; + repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; } // Performs a logical “or” operation on the result of each individual filter. // Filters are evaluated sequentially and if one of them returns true, the // filter returns true immediately. message OrFilter { - repeated AccessLogFilter filters = 2 [(validate.rules).repeated .min_items = 2]; + repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; } // Filters requests based on the presence or value of a request header. message HeaderFilter { // Only requests with a header which matches the specified HeaderMatcher will pass the filter // check. - envoy.api.v2.route.HeaderMatcher header = 1 [(validate.rules).message.required = true]; + api.v2.route.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. @@ -178,27 +178,29 @@ message ResponseFlagFilter { // Only responses with the any of the flags listed in this field will be logged. // This field is optional. If it is not specified, then any response flag will pass // the filter check. - repeated string flags = 1 [(validate.rules).repeated .items.string = { - in: [ - "LH", - "UH", - "UT", - "LR", - "UR", - "UF", - "UC", - "UO", - "NR", - "DI", - "FI", - "RL", - "UAEX", - "RLSE", - "DC", - "URX", - "SI", - "IH" - ] + repeated string flags = 1 [(validate.rules).repeated = { + items { + string { + in: "LH" + in: "UH" + in: "UT" + in: "LR" + in: "UR" + in: "UF" + in: "UC" + in: "UO" + in: "NR" + in: "DI" + in: "FI" + in: "RL" + in: "UAEX" + in: "RLSE" + in: "DC" + in: "URX" + in: "SI" + in: "IH" + } + } }]; } @@ -226,7 +228,7 @@ message GrpcStatusFilter { } // Logs only responses that have any one of the gRPC statuses in this field. - repeated Status statuses = 1 [(validate.rules).repeated .items.enum.defined_only = true]; + repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; // If included and set to true, the filter will instead block all responses with a gRPC status or // inferred gRPC status enumerated in statuses, and allow all other responses. @@ -242,6 +244,7 @@ message ExtensionFilter { // Custom configuration that depends on the filter being instantiated. oneof config_type { google.protobuf.Struct config = 2; + google.protobuf.Any typed_config = 3; } } diff --git a/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto b/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto index b7beef0bd974..4d3a07952632 100644 --- a/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto +++ b/api/envoy/config/filter/accesslog/v3alpha/accesslog.proto @@ -98,22 +98,22 @@ message ComparisonFilter { } // Comparison operator. - Op op = 1 [(validate.rules).enum.defined_only = true]; + Op op = 1 [(validate.rules).enum = {defined_only: true}]; // Value to compare against. - envoy.api.v3alpha.core.RuntimeUInt32 value = 2; + api.v3alpha.core.RuntimeUInt32 value = 2; } // Filters on HTTP response/status code. message StatusCodeFilter { // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters on total request duration in milliseconds. message DurationFilter { // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters for requests that are not health check requests. A health check @@ -130,10 +130,10 @@ message TraceableFilter { message RuntimeFilter { // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. // If found in runtime, this value will replace the default numerator. - string runtime_key = 1 [(validate.rules).string.min_bytes = 1]; + string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. - envoy.type.FractionalPercent percent_sampled = 2; + type.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header // :ref:`x-request-id` being present. If @@ -154,21 +154,21 @@ message RuntimeFilter { // Filters are evaluated sequentially and if one of them returns false, the // filter returns false immediately. message AndFilter { - repeated AccessLogFilter filters = 1 [(validate.rules).repeated .min_items = 2]; + repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; } // Performs a logical “or” operation on the result of each individual filter. // Filters are evaluated sequentially and if one of them returns true, the // filter returns true immediately. message OrFilter { - repeated AccessLogFilter filters = 2 [(validate.rules).repeated .min_items = 2]; + repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; } // Filters requests based on the presence or value of a request header. message HeaderFilter { // Only requests with a header which matches the specified HeaderMatcher will pass the filter // check. - envoy.api.v3alpha.route.HeaderMatcher header = 1 [(validate.rules).message.required = true]; + api.v3alpha.route.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. @@ -178,27 +178,29 @@ message ResponseFlagFilter { // Only responses with the any of the flags listed in this field will be logged. // This field is optional. If it is not specified, then any response flag will pass // the filter check. - repeated string flags = 1 [(validate.rules).repeated .items.string = { - in: [ - "LH", - "UH", - "UT", - "LR", - "UR", - "UF", - "UC", - "UO", - "NR", - "DI", - "FI", - "RL", - "UAEX", - "RLSE", - "DC", - "URX", - "SI", - "IH" - ] + repeated string flags = 1 [(validate.rules).repeated = { + items { + string { + in: "LH" + in: "UH" + in: "UT" + in: "LR" + in: "UR" + in: "UF" + in: "UC" + in: "UO" + in: "NR" + in: "DI" + in: "FI" + in: "RL" + in: "UAEX" + in: "RLSE" + in: "DC" + in: "URX" + in: "SI" + in: "IH" + } + } }]; } @@ -226,7 +228,7 @@ message GrpcStatusFilter { } // Logs only responses that have any one of the gRPC statuses in this field. - repeated Status statuses = 1 [(validate.rules).repeated .items.enum.defined_only = true]; + repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; // If included and set to true, the filter will instead block all responses with a gRPC status or // inferred gRPC status enumerated in statuses, and allow all other responses. @@ -242,6 +244,7 @@ message ExtensionFilter { // Custom configuration that depends on the filter being instantiated. oneof config_type { google.protobuf.Struct config = 2; + google.protobuf.Any typed_config = 3; } } diff --git a/api/envoy/config/filter/fault/v2/fault.proto b/api/envoy/config/filter/fault/v2/fault.proto index 15164172dcf4..41b4a9f09600 100644 --- a/api/envoy/config/filter/fault/v2/fault.proto +++ b/api/envoy/config/filter/fault/v2/fault.proto @@ -17,22 +17,22 @@ import "validate/validate.proto"; // Delay specification is used to inject latency into the // HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. message FaultDelay { + enum FaultDelayType { + // Unused and deprecated. + FIXED = 0; + } + // Fault delays are controlled via an HTTP header (if applicable). See the // :ref:`http fault filter ` documentation for // more information. message HeaderDelay { } - enum FaultDelayType { - // Unused and deprecated. - FIXED = 0; - } + reserved 2; // Unused and deprecated. Will be removed in the next release. FaultDelayType type = 1 [deprecated = true]; - reserved 2; - oneof fault_delay_secifier { option (validate.required) = true; @@ -42,7 +42,7 @@ message FaultDelay { // delay will be injected before a new request/operation. For TCP // connections, the proxying of the connection upstream will be delayed // for the specified period. This is required if type is FIXED. - google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; // Fault delays are controlled via an HTTP header (if applicable). HeaderDelay header_delay = 5; @@ -57,7 +57,7 @@ message FaultRateLimit { // Describes a fixed/constant rate limit. message FixedLimit { // The limit supplied in KiB/s. - uint64 limit_kbps = 1 [(validate.rules).uint64.gte = 1]; + uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}]; } // Rate limits are controlled via an HTTP header (if applicable). See the diff --git a/api/envoy/config/filter/fault/v3alpha/fault.proto b/api/envoy/config/filter/fault/v3alpha/fault.proto index 21e0f9e12e67..19235b085f03 100644 --- a/api/envoy/config/filter/fault/v3alpha/fault.proto +++ b/api/envoy/config/filter/fault/v3alpha/fault.proto @@ -17,22 +17,22 @@ import "validate/validate.proto"; // Delay specification is used to inject latency into the // HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. message FaultDelay { + enum FaultDelayType { + // Unused and deprecated. + FIXED = 0; + } + // Fault delays are controlled via an HTTP header (if applicable). See the // :ref:`http fault filter ` documentation for // more information. message HeaderDelay { } - enum FaultDelayType { - // Unused and deprecated. - FIXED = 0; - } + reserved 2; // Unused and deprecated. Will be removed in the next release. FaultDelayType type = 1 [deprecated = true]; - reserved 2; - oneof fault_delay_secifier { option (validate.required) = true; @@ -42,7 +42,7 @@ message FaultDelay { // delay will be injected before a new request/operation. For TCP // connections, the proxying of the connection upstream will be delayed // for the specified period. This is required if type is FIXED. - google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration.gt = {}]; + google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; // Fault delays are controlled via an HTTP header (if applicable). HeaderDelay header_delay = 5; @@ -57,7 +57,7 @@ message FaultRateLimit { // Describes a fixed/constant rate limit. message FixedLimit { // The limit supplied in KiB/s. - uint64 limit_kbps = 1 [(validate.rules).uint64.gte = 1]; + uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}]; } // Rate limits are controlled via an HTTP header (if applicable). See the diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD new file mode 100644 index 000000000000..a02fc542756c --- /dev/null +++ b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/BUILD @@ -0,0 +1,19 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/api/v3alpha/core", + "//envoy/type", + ], +) + +api_proto_library_internal( + name = "adaptive_concurrency", + srcs = ["adaptive_concurrency.proto"], + deps = [ + "//envoy/api/v3alpha/core:base", + "//envoy/type:percent", + ], +) diff --git a/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto new file mode 100644 index 000000000000..bd3e16ae5202 --- /dev/null +++ b/api/envoy/config/filter/http/adaptive_concurrency/v3alpha/adaptive_concurrency.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package envoy.config.filter.http.adaptive_concurrency.v3alpha; + +option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v3alpha"; +option java_outer_classname = "AdaptiveConcurrencyProto"; +option java_multiple_files = true; + +import "envoy/type/percent.proto"; + +import "google/protobuf/duration.proto"; +import "google/api/annotations.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +// Configuration parameters for the gradient controller. +message GradientControllerConfig { + // The percentile to use when summarizing aggregated samples. Defaults to p50. + envoy.type.Percent sample_aggregate_percentile = 1; + + // Parameters controlling the periodic recalculation of the concurrency limit from sampled request + // latencies. + message ConcurrencyLimitCalculationParams { + // The maximum value the gradient is allowed to take. This influences how aggressively the + // concurrency limit can increase. Defaults to 2.0. + google.protobuf.DoubleValue max_gradient = 1 [(validate.rules).double.gt = 1.0]; + + // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000. + google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32.gt = 0]; + + // The period of time samples are taken to recalculate the concurrency limit. + google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = { + required: true, + gt: {seconds: 0} + }]; + } + ConcurrencyLimitCalculationParams concurrency_limit_params = 2 + [(validate.rules).message.required = true]; + + // Parameters controlling the periodic minRTT recalculation. + message MinimumRTTCalculationParams { + // The time interval between recalculating the minimum request round-trip time. + google.protobuf.Duration interval = 1 [(validate.rules).duration = { + required: true, + gt: {seconds: 0} + }]; + + // The number of requests to aggregate/sample during the minRTT recalculation window before + // updating. Defaults to 50. + google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32.gt = 0]; + }; + MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message.required = true]; +} + +message AdaptiveConcurrency { + oneof concurrency_controller_config { + option (validate.required) = true; + + // Gradient concurrency control will be used. + GradientControllerConfig gradient_controller_config = 1 + [(validate.rules).message.required = true]; + } +} diff --git a/api/envoy/config/filter/http/buffer/v2/buffer.proto b/api/envoy/config/filter/http/buffer/v2/buffer.proto index ce6c0d6d1423..44062f248199 100644 --- a/api/envoy/config/filter/http/buffer/v2/buffer.proto +++ b/api/envoy/config/filter/http/buffer/v2/buffer.proto @@ -14,11 +14,11 @@ import "validate/validate.proto"; // Buffer :ref:`configuration overview `. message Buffer { - reserved 2; // formerly max_request_time + reserved 2; // The maximum request size that the filter will buffer before the connection // manager will stop buffering and return a 413 response. - google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; } message BufferPerRoute { @@ -26,9 +26,9 @@ message BufferPerRoute { option (validate.required) = true; // Disable the buffer filter for this particular vhost or route. - bool disabled = 1 [(validate.rules).bool.const = true]; + bool disabled = 1 [(validate.rules).bool = {const: true}]; // Override the global configuration of the filter with this new config. - Buffer buffer = 2 [(validate.rules).message.required = true]; + Buffer buffer = 2 [(validate.rules).message = {required: true}]; } } diff --git a/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto b/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto index 9d44f3503229..5bf936743db4 100644 --- a/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto +++ b/api/envoy/config/filter/http/buffer/v3alpha/buffer.proto @@ -14,11 +14,11 @@ import "validate/validate.proto"; // Buffer :ref:`configuration overview `. message Buffer { - reserved 2; // formerly max_request_time + reserved 2; // The maximum request size that the filter will buffer before the connection // manager will stop buffering and return a 413 response. - google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; } message BufferPerRoute { @@ -26,9 +26,9 @@ message BufferPerRoute { option (validate.required) = true; // Disable the buffer filter for this particular vhost or route. - bool disabled = 1 [(validate.rules).bool.const = true]; + bool disabled = 1 [(validate.rules).bool = {const: true}]; // Override the global configuration of the filter with this new config. - Buffer buffer = 2 [(validate.rules).message.required = true]; + Buffer buffer = 2 [(validate.rules).message = {required: true}]; } } diff --git a/api/envoy/config/filter/http/csrf/v2/csrf.proto b/api/envoy/config/filter/http/csrf/v2/csrf.proto index b5c78db544a7..df86120c28fd 100644 --- a/api/envoy/config/filter/http/csrf/v2/csrf.proto +++ b/api/envoy/config/filter/http/csrf/v2/csrf.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.filter.http.csrf.v2; -option java_outer_classname = "CsrfPolicyProto"; +option java_outer_classname = "CsrfProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2"; @@ -25,8 +25,8 @@ message CsrfPolicy { // // This field defaults to 100/:ref:`HUNDRED // `. - envoy.api.v2.core.RuntimeFractionalPercent filter_enabled = 1 - [(validate.rules).message.required = true]; + api.v2.core.RuntimeFractionalPercent filter_enabled = 1 + [(validate.rules).message = {required: true}]; // Specifies that CSRF policies will be evaluated and tracked, but not enforced. // This is intended to be used when filter_enabled is off. @@ -38,12 +38,12 @@ message CsrfPolicy { // // This field defaults to 100/:ref:`HUNDRED // `. - envoy.api.v2.core.RuntimeFractionalPercent shadow_enabled = 2; + api.v2.core.RuntimeFractionalPercent shadow_enabled = 2; // Specifies additional source origins that will be allowed in addition to // the destination origin. // // More information on how this can be configured via runtime can be found // :ref:`here `. - repeated envoy.type.matcher.StringMatcher additional_origins = 3; + repeated type.matcher.StringMatcher additional_origins = 3; } diff --git a/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto index 7c8416878a74..7913b270ffa7 100644 --- a/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto +++ b/api/envoy/config/filter/http/csrf/v3alpha/csrf.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.filter.http.csrf.v3alpha; -option java_outer_classname = "CsrfPolicyProto"; +option java_outer_classname = "CsrfProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v3alpha"; @@ -25,8 +25,8 @@ message CsrfPolicy { // // This field defaults to 100/:ref:`HUNDRED // `. - envoy.api.v3alpha.core.RuntimeFractionalPercent filter_enabled = 1 - [(validate.rules).message.required = true]; + api.v3alpha.core.RuntimeFractionalPercent filter_enabled = 1 + [(validate.rules).message = {required: true}]; // Specifies that CSRF policies will be evaluated and tracked, but not enforced. // This is intended to be used when filter_enabled is off. @@ -38,12 +38,12 @@ message CsrfPolicy { // // This field defaults to 100/:ref:`HUNDRED // `. - envoy.api.v3alpha.core.RuntimeFractionalPercent shadow_enabled = 2; + api.v3alpha.core.RuntimeFractionalPercent shadow_enabled = 2; // Specifies additional source origins that will be allowed in addition to // the destination origin. // // More information on how this can be configured via runtime can be found // :ref:`here `. - repeated envoy.type.matcher.StringMatcher additional_origins = 3; + repeated type.matcher.StringMatcher additional_origins = 3; } diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index c315ddb46515..daa0822341a0 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -19,5 +19,5 @@ message FilterConfig { // match that of associated :ref:`dynamic forward proxy cluster configuration // `. common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto index f60aaae89e2e..a8054cf69836 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto @@ -19,5 +19,5 @@ message FilterConfig { // match that of associated :ref:`dynamic forward proxy cluster configuration // `. common.dynamic_forward_proxy.v3alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index 84d4ab19495b..d18cc8440b98 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -9,7 +9,6 @@ option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/grpc_service.proto"; import "envoy/api/v2/core/http_uri.proto"; - import "envoy/type/http_status.proto"; import "envoy/type/matcher/string.proto"; @@ -22,7 +21,7 @@ message ExtAuthz { // External authorization service configuration. oneof services { // gRPC service configuration (default timeout: 200ms). - envoy.api.v2.core.GrpcService grpc_service = 1; + api.v2.core.GrpcService grpc_service = 1; // HTTP service configuration (default timeout: 200ms). HttpService http_service = 3; @@ -67,7 +66,7 @@ message ExtAuthz { // Sets the HTTP status that is returned to the client when there is a network error between the // filter and the authorization server. The default status is HTTP 403 Forbidden. - envoy.type.HttpStatus status_on_error = 7; + type.HttpStatus status_on_error = 7; // Specifies a list of metadata namespaces whose values, if present, will be passed to the // ext_authz service as an opaque *protobuf::Struct*. @@ -90,7 +89,7 @@ message BufferSettings { // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow // `. - uint32 max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. // The authorization request will be dispatched and no 413 HTTP error will be returned by the @@ -123,17 +122,14 @@ message BufferSettings { // ` // for details. message HttpService { + reserved 3, 4, 5, 6; + // Sets the HTTP server URI which the authorization requests must be sent to. - envoy.api.v2.core.HttpUri server_uri = 1; + api.v2.core.HttpUri server_uri = 1; // Sets a prefix to the value of authorization request header *Path*. string path_prefix = 2; - reserved 3; - reserved 4; - reserved 5; - reserved 6; - // Settings used for controlling authorization request metadata. AuthorizationRequest authorization_request = 7; @@ -151,25 +147,25 @@ message AuthorizationRequest { // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have // a message body. // - envoy.type.matcher.ListStringMatcher allowed_headers = 1; + type.matcher.ListStringMatcher allowed_headers = 1; // Sets a list of headers that will be included to the request to authorization service. Note that // client request of the same key will be overridden. - repeated envoy.api.v2.core.HeaderValue headers_to_add = 2; + repeated api.v2.core.HeaderValue headers_to_add = 2; } message AuthorizationResponse { // When this :ref:`list ` is set, authorization // response headers that have a correspondent match will be added to the original client request. // Note that coexistent headers will be overridden. - envoy.type.matcher.ListStringMatcher allowed_upstream_headers = 1; + type.matcher.ListStringMatcher allowed_upstream_headers = 1; // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority // (Host)* will be in the response to the client. When a header is included in this list, *Path*, // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. - envoy.type.matcher.ListStringMatcher allowed_client_headers = 2; + type.matcher.ListStringMatcher allowed_client_headers = 2; } // Extra settings on a per virtualhost/route/weighted-cluster level. @@ -179,10 +175,10 @@ message ExtAuthzPerRoute { // Disable the ext auth filter for this particular vhost or route. // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool.const = true]; + bool disabled = 1 [(validate.rules).bool = {const: true}]; // Check request settings for this route. - CheckSettings check_settings = 2 [(validate.rules).message.required = true]; + CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; } } diff --git a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto index 113be0256b1f..9f6363ee8af4 100644 --- a/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v3alpha/ext_authz.proto @@ -9,7 +9,6 @@ option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v3alpha" import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/core/grpc_service.proto"; import "envoy/api/v3alpha/core/http_uri.proto"; - import "envoy/type/http_status.proto"; import "envoy/type/matcher/string.proto"; @@ -22,7 +21,7 @@ message ExtAuthz { // External authorization service configuration. oneof services { // gRPC service configuration (default timeout: 200ms). - envoy.api.v3alpha.core.GrpcService grpc_service = 1; + api.v3alpha.core.GrpcService grpc_service = 1; // HTTP service configuration (default timeout: 200ms). HttpService http_service = 3; @@ -67,7 +66,7 @@ message ExtAuthz { // Sets the HTTP status that is returned to the client when there is a network error between the // filter and the authorization server. The default status is HTTP 403 Forbidden. - envoy.type.HttpStatus status_on_error = 7; + type.HttpStatus status_on_error = 7; // Specifies a list of metadata namespaces whose values, if present, will be passed to the // ext_authz service as an opaque *protobuf::Struct*. @@ -90,7 +89,7 @@ message BufferSettings { // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow // `. - uint32 max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. // The authorization request will be dispatched and no 413 HTTP error will be returned by the @@ -123,17 +122,14 @@ message BufferSettings { // ` // for details. message HttpService { + reserved 3, 4, 5, 6; + // Sets the HTTP server URI which the authorization requests must be sent to. - envoy.api.v3alpha.core.HttpUri server_uri = 1; + api.v3alpha.core.HttpUri server_uri = 1; // Sets a prefix to the value of authorization request header *Path*. string path_prefix = 2; - reserved 3; - reserved 4; - reserved 5; - reserved 6; - // Settings used for controlling authorization request metadata. AuthorizationRequest authorization_request = 7; @@ -151,25 +147,25 @@ message AuthorizationRequest { // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have // a message body. // - envoy.type.matcher.ListStringMatcher allowed_headers = 1; + type.matcher.ListStringMatcher allowed_headers = 1; // Sets a list of headers that will be included to the request to authorization service. Note that // client request of the same key will be overridden. - repeated envoy.api.v3alpha.core.HeaderValue headers_to_add = 2; + repeated api.v3alpha.core.HeaderValue headers_to_add = 2; } message AuthorizationResponse { // When this :ref:`list ` is set, authorization // response headers that have a correspondent match will be added to the original client request. // Note that coexistent headers will be overridden. - envoy.type.matcher.ListStringMatcher allowed_upstream_headers = 1; + type.matcher.ListStringMatcher allowed_upstream_headers = 1; // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority // (Host)* will be in the response to the client. When a header is included in this list, *Path*, // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. - envoy.type.matcher.ListStringMatcher allowed_client_headers = 2; + type.matcher.ListStringMatcher allowed_client_headers = 2; } // Extra settings on a per virtualhost/route/weighted-cluster level. @@ -179,10 +175,10 @@ message ExtAuthzPerRoute { // Disable the ext auth filter for this particular vhost or route. // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool.const = true]; + bool disabled = 1 [(validate.rules).bool = {const: true}]; // Check request settings for this route. - CheckSettings check_settings = 2 [(validate.rules).message.required = true]; + CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; } } diff --git a/api/envoy/config/filter/http/fault/v2/fault.proto b/api/envoy/config/filter/http/fault/v2/fault.proto index 8256690837fc..98c13ff97b40 100644 --- a/api/envoy/config/filter/http/fault/v2/fault.proto +++ b/api/envoy/config/filter/http/fault/v2/fault.proto @@ -24,7 +24,7 @@ message FaultAbort { option (validate.required) = true; // HTTP status code to use to abort the HTTP request. - uint32 http_status = 2 [(validate.rules).uint32 = {gte: 200, lt: 600}]; + uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; } // The percentage of requests/operations/connections that will be aborted with the error code @@ -55,7 +55,7 @@ message HTTPFault { // headers in the filter config. A match will happen if all the headers in the // config are present in the request with the same values (or based on // presence if the *value* field is not in the config). - repeated envoy.api.v2.route.HeaderMatcher headers = 4; + repeated api.v2.route.HeaderMatcher headers = 4; // Faults are injected for the specified list of downstream hosts. If this // setting is not set, faults are injected for all downstream nodes. diff --git a/api/envoy/config/filter/http/fault/v3alpha/fault.proto b/api/envoy/config/filter/http/fault/v3alpha/fault.proto index 2189e4a4c131..9b897daae834 100644 --- a/api/envoy/config/filter/http/fault/v3alpha/fault.proto +++ b/api/envoy/config/filter/http/fault/v3alpha/fault.proto @@ -24,7 +24,7 @@ message FaultAbort { option (validate.required) = true; // HTTP status code to use to abort the HTTP request. - uint32 http_status = 2 [(validate.rules).uint32 = {gte: 200, lt: 600}]; + uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; } // The percentage of requests/operations/connections that will be aborted with the error code @@ -55,7 +55,7 @@ message HTTPFault { // headers in the filter config. A match will happen if all the headers in the // config are present in the request with the same values (or based on // presence if the *value* field is not in the config). - repeated envoy.api.v3alpha.route.HeaderMatcher headers = 4; + repeated api.v3alpha.route.HeaderMatcher headers = 4; // Faults are injected for the specified list of downstream hosts. If this // setting is not set, faults are injected for all downstream nodes. diff --git a/api/envoy/config/filter/http/gzip/v2/gzip.proto b/api/envoy/config/filter/http/gzip/v2/gzip.proto index 02041b87fd27..6b3914751be2 100644 --- a/api/envoy/config/filter/http/gzip/v2/gzip.proto +++ b/api/envoy/config/filter/http/gzip/v2/gzip.proto @@ -14,12 +14,12 @@ import "validate/validate.proto"; // Gzip :ref:`configuration overview `. message Gzip { - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {gte: 1, lte: 9}]; - - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value content_length = 2 [(validate.rules).uint32.gte = 30]; + enum CompressionStrategy { + DEFAULT = 0; + FILTERED = 1; + HUFFMAN = 2; + RLE = 3; + } message CompressionLevel { enum Enum { @@ -29,19 +29,19 @@ message Gzip { } } + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values + // use more memory, but are faster and produce better compression results. The default value is 5. + google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; + + // Minimum response length, in bytes, which will trigger compression. The default value is 30. + google.protobuf.UInt32Value content_length = 2 [(validate.rules).uint32 = {gte: 30}]; + // A value used for selecting the zlib compression level. This setting will affect speed and // amount of compression applied to the content. "BEST" provides higher compression at the cost of // higher latency, "SPEED" provides lower compression with minimum impact on response time. // "DEFAULT" provides an optimal result between speed and compression. This field will be set to // "DEFAULT" if not specified. - CompressionLevel.Enum compression_level = 3 [(validate.rules).enum.defined_only = true]; - - enum CompressionStrategy { - DEFAULT = 0; - FILTERED = 1; - HUFFMAN = 2; - RLE = 3; - } + CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; // A value used for selecting the zlib compression strategy which is directly related to the // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though @@ -49,7 +49,7 @@ message Gzip { // run-length encoding (RLE) is typically used when the content is known for having sequences // which same data occurs many consecutive times. For more information about each strategy, please // refer to zlib manual. - CompressionStrategy compression_strategy = 4 [(validate.rules).enum.defined_only = true]; + CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; // Set of strings that allows specifying which mime-types yield compression; e.g., // application/json, text/html, etc. When this field is not defined, compression will be applied @@ -69,5 +69,5 @@ message Gzip { // Larger window results in better compression at the expense of memory usage. The default is 12 // which will produce a 4096 bytes window. For more details about this parameter, please refer to // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {gte: 9, lte: 15}]; + google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; } diff --git a/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto b/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto index 26e437d48c52..a0833db464e1 100644 --- a/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto +++ b/api/envoy/config/filter/http/gzip/v3alpha/gzip.proto @@ -14,12 +14,12 @@ import "validate/validate.proto"; // Gzip :ref:`configuration overview `. message Gzip { - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {gte: 1, lte: 9}]; - - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value content_length = 2 [(validate.rules).uint32.gte = 30]; + enum CompressionStrategy { + DEFAULT = 0; + FILTERED = 1; + HUFFMAN = 2; + RLE = 3; + } message CompressionLevel { enum Enum { @@ -29,19 +29,19 @@ message Gzip { } } + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values + // use more memory, but are faster and produce better compression results. The default value is 5. + google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; + + // Minimum response length, in bytes, which will trigger compression. The default value is 30. + google.protobuf.UInt32Value content_length = 2 [(validate.rules).uint32 = {gte: 30}]; + // A value used for selecting the zlib compression level. This setting will affect speed and // amount of compression applied to the content. "BEST" provides higher compression at the cost of // higher latency, "SPEED" provides lower compression with minimum impact on response time. // "DEFAULT" provides an optimal result between speed and compression. This field will be set to // "DEFAULT" if not specified. - CompressionLevel.Enum compression_level = 3 [(validate.rules).enum.defined_only = true]; - - enum CompressionStrategy { - DEFAULT = 0; - FILTERED = 1; - HUFFMAN = 2; - RLE = 3; - } + CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; // A value used for selecting the zlib compression strategy which is directly related to the // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though @@ -49,7 +49,7 @@ message Gzip { // run-length encoding (RLE) is typically used when the content is known for having sequences // which same data occurs many consecutive times. For more information about each strategy, please // refer to zlib manual. - CompressionStrategy compression_strategy = 4 [(validate.rules).enum.defined_only = true]; + CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; // Set of strings that allows specifying which mime-types yield compression; e.g., // application/json, text/html, etc. When this field is not defined, compression will be applied @@ -69,5 +69,5 @@ message Gzip { // Larger window results in better compression at the expense of memory usage. The default is 12 // which will produce a 4096 bytes window. For more details about this parameter, please refer to // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {gte: 9, lte: 15}]; + google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; } diff --git a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto index 345c5225edf1..6b66a0c89797 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto +++ b/api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto @@ -18,6 +18,7 @@ import "validate/validate.proto"; message Config { enum ValueType { STRING = 0; + NUMBER = 1; // The value is a serialized `protobuf.Value @@ -41,7 +42,7 @@ message Config { string metadata_namespace = 1; // The key to use within the namespace. - string key = 2 [(validate.rules).string.min_bytes = 1]; + string key = 2 [(validate.rules).string = {min_bytes: 1}]; // The value to pair with the given key. // @@ -63,7 +64,7 @@ message Config { // A Rule defines what metadata to apply when a header is present or missing. message Rule { // The header that triggers this rule — required. - string header = 1 [(validate.rules).string.min_bytes = 1]; + string header = 1 [(validate.rules).string = {min_bytes: 1}]; // If the header is present, apply this metadata KeyValuePair. // diff --git a/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto b/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto index c3811a00577a..9371f35b1227 100644 --- a/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto +++ b/api/envoy/config/filter/http/header_to_metadata/v3alpha/header_to_metadata.proto @@ -18,6 +18,7 @@ import "validate/validate.proto"; message Config { enum ValueType { STRING = 0; + NUMBER = 1; // The value is a serialized `protobuf.Value @@ -41,7 +42,7 @@ message Config { string metadata_namespace = 1; // The key to use within the namespace. - string key = 2 [(validate.rules).string.min_bytes = 1]; + string key = 2 [(validate.rules).string = {min_bytes: 1}]; // The value to pair with the given key. // @@ -63,7 +64,7 @@ message Config { // A Rule defines what metadata to apply when a header is present or missing. message Rule { // The header that triggers this rule — required. - string header = 1 [(validate.rules).string.min_bytes = 1]; + string header = 1 [(validate.rules).string = {min_bytes: 1}]; // If the header is present, apply this metadata KeyValuePair. // diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto index 9cd572b43709..15a5b7fd3b7e 100644 --- a/api/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto @@ -6,23 +6,22 @@ option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2"; -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - import "envoy/api/v2/route/route.proto"; import "envoy/type/percent.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. message HealthCheck { - // Specifies whether the filter operates in pass through mode or not. - google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message.required = true]; - reserved 2; - reserved "endpoint"; + + // Specifies whether the filter operates in pass through mode or not. + google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; // If operating in pass through mode, the amount of time in milliseconds // that the filter should cache the upstream response. @@ -31,10 +30,10 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. - map cluster_min_healthy_percentages = 4; + map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will // check a request’s headers against all the specified headers. To specify the health check // endpoint, set the ``:path`` header to match on. - repeated envoy.api.v2.route.HeaderMatcher headers = 5; + repeated api.v2.route.HeaderMatcher headers = 5; } diff --git a/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto index c5e91e703d5b..12d4549fa58b 100644 --- a/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v3alpha/health_check.proto @@ -6,23 +6,22 @@ option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v3alpha"; -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - import "envoy/api/v3alpha/route/route.proto"; import "envoy/type/percent.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. message HealthCheck { - // Specifies whether the filter operates in pass through mode or not. - google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message.required = true]; - reserved 2; - reserved "endpoint"; + + // Specifies whether the filter operates in pass through mode or not. + google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; // If operating in pass through mode, the amount of time in milliseconds // that the filter should cache the upstream response. @@ -31,10 +30,10 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. - map cluster_min_healthy_percentages = 4; + map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will // check a request’s headers against all the specified headers. To specify the health check // endpoint, set the ``:path`` header to match on. - repeated envoy.api.v3alpha.route.HeaderMatcher headers = 5; + repeated api.v3alpha.route.HeaderMatcher headers = 5; } diff --git a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto index 92ec469c62ad..ac088d80eaac 100644 --- a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto +++ b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto @@ -14,7 +14,6 @@ import "validate/validate.proto"; // IP tagging :ref:`configuration overview `. message IPTagging { - // The type of requests the filter should apply to. The supported types // are internal, external or both. The // :ref:`x-forwarded-for` header is @@ -32,9 +31,6 @@ message IPTagging { EXTERNAL = 2; } - // The type of request the filter should apply to. - RequestType request_type = 1 [(validate.rules).enum.defined_only = true]; - // Supplies the IP tag name and the IP address subnets. message IPTag { // Specifies the IP tag name to apply. @@ -42,11 +38,14 @@ message IPTagging { // A list of IP address subnets that will be tagged with // ip_tag_name. Both IPv4 and IPv6 are supported. - repeated envoy.api.v2.core.CidrRange ip_list = 2; + repeated api.v2.core.CidrRange ip_list = 2; } + // The type of request the filter should apply to. + RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] // The set of IP tags for the filter. - repeated IPTag ip_tags = 4 [(validate.rules).repeated .min_items = 1]; + repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto index de7871d9e701..b076b6080227 100644 --- a/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto +++ b/api/envoy/config/filter/http/ip_tagging/v3alpha/ip_tagging.proto @@ -14,7 +14,6 @@ import "validate/validate.proto"; // IP tagging :ref:`configuration overview `. message IPTagging { - // The type of requests the filter should apply to. The supported types // are internal, external or both. The // :ref:`x-forwarded-for` header is @@ -32,9 +31,6 @@ message IPTagging { EXTERNAL = 2; } - // The type of request the filter should apply to. - RequestType request_type = 1 [(validate.rules).enum.defined_only = true]; - // Supplies the IP tag name and the IP address subnets. message IPTag { // Specifies the IP tag name to apply. @@ -42,11 +38,14 @@ message IPTagging { // A list of IP address subnets that will be tagged with // ip_tag_name. Both IPv4 and IPv6 are supported. - repeated envoy.api.v3alpha.core.CidrRange ip_list = 2; + repeated api.v3alpha.core.CidrRange ip_list = 2; } + // The type of request the filter should apply to. + RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] // The set of IP tags for the filter. - repeated IPTag ip_tags = 4 [(validate.rules).repeated .min_items = 1]; + repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index c07b780b9649..e2584b137505 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -1,4 +1,3 @@ - syntax = "proto3"; package envoy.config.filter.http.jwt_authn.v2alpha; @@ -10,8 +9,10 @@ option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha" import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/http_uri.proto"; import "envoy/api/v2/route/route.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; + import "validate/validate.proto"; // [#protodoc-title: JWT Authentication] @@ -53,7 +54,7 @@ message JwtProvider { // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // - string issuer = 1 [(validate.rules).string.min_bytes = 1]; + string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, @@ -107,7 +108,7 @@ message JwtProvider { // local_jwks: // inline_string: ACADADADADA // - envoy.api.v2.core.DataSource local_jwks = 4; + api.v2.core.DataSource local_jwks = 4; } // If false, the JWT is removed in the request after a success verification. If true, the JWT is @@ -193,7 +194,7 @@ message RemoteJwks { // uri: https://www.googleapis.com/oauth2/v1/certs // cluster: jwt.www.googleapis.com|443 // - envoy.api.v2.core.HttpUri http_uri = 1; + api.v2.core.HttpUri http_uri = 1; // Duration after which the cached JWKS should be expired. If not specified, default cache // duration is 5 minutes. @@ -203,7 +204,7 @@ message RemoteJwks { // This message specifies a header location to extract JWT token. message JwtHeader { // The HTTP header name. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the @@ -288,14 +289,14 @@ message JwtRequirement { // Their results are OR-ed; if any one of them passes, the result is passed message JwtRequirementOrList { // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated .min_items = 2]; + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a list of RequiredProvider. // Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. message JwtRequirementAndList { // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated .min_items = 2]; + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a Jwt requirement for a specific Route condition. @@ -330,7 +331,7 @@ message RequirementRule { // match: // prefix: / // - envoy.api.v2.route.RouteMatch match = 1 [(validate.rules).message.required = true]; + api.v2.route.RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. JwtRequirement requires = 2; @@ -355,7 +356,7 @@ message RequirementRule { // jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. message FilterStateRule { // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. diff --git a/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto index bc4785e64e51..40751d9ea0a9 100644 --- a/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v3alpha/config.proto @@ -1,4 +1,3 @@ - syntax = "proto3"; package envoy.config.filter.http.jwt_authn.v3alpha; @@ -10,8 +9,10 @@ option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v3alpha" import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/core/http_uri.proto"; import "envoy/api/v3alpha/route/route.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; + import "validate/validate.proto"; // [#protodoc-title: JWT Authentication] @@ -53,7 +54,7 @@ message JwtProvider { // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // - string issuer = 1 [(validate.rules).string.min_bytes = 1]; + string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, @@ -107,7 +108,7 @@ message JwtProvider { // local_jwks: // inline_string: ACADADADADA // - envoy.api.v3alpha.core.DataSource local_jwks = 4; + api.v3alpha.core.DataSource local_jwks = 4; } // If false, the JWT is removed in the request after a success verification. If true, the JWT is @@ -193,7 +194,7 @@ message RemoteJwks { // uri: https://www.googleapis.com/oauth2/v1/certs // cluster: jwt.www.googleapis.com|443 // - envoy.api.v3alpha.core.HttpUri http_uri = 1; + api.v3alpha.core.HttpUri http_uri = 1; // Duration after which the cached JWKS should be expired. If not specified, default cache // duration is 5 minutes. @@ -203,7 +204,7 @@ message RemoteJwks { // This message specifies a header location to extract JWT token. message JwtHeader { // The HTTP header name. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the @@ -288,14 +289,14 @@ message JwtRequirement { // Their results are OR-ed; if any one of them passes, the result is passed message JwtRequirementOrList { // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated .min_items = 2]; + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a list of RequiredProvider. // Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. message JwtRequirementAndList { // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated .min_items = 2]; + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a Jwt requirement for a specific Route condition. @@ -330,7 +331,7 @@ message RequirementRule { // match: // prefix: / // - envoy.api.v3alpha.route.RouteMatch match = 1 [(validate.rules).message.required = true]; + api.v3alpha.route.RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. JwtRequirement requires = 2; @@ -355,7 +356,7 @@ message RequirementRule { // jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. message FilterStateRule { // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. diff --git a/api/envoy/config/filter/http/lua/v2/lua.proto b/api/envoy/config/filter/http/lua/v2/lua.proto index 6fc7fabc6be3..dae34551a0ac 100644 --- a/api/envoy/config/filter/http/lua/v2/lua.proto +++ b/api/envoy/config/filter/http/lua/v2/lua.proto @@ -16,5 +16,5 @@ message Lua { // further loads code from disk if desired. Note that if JSON configuration is used, the code must // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. - string inline_code = 1 [(validate.rules).string.min_bytes = 1]; + string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/config/filter/http/lua/v3alpha/lua.proto b/api/envoy/config/filter/http/lua/v3alpha/lua.proto index 934a592678a4..73cfee75561d 100644 --- a/api/envoy/config/filter/http/lua/v3alpha/lua.proto +++ b/api/envoy/config/filter/http/lua/v3alpha/lua.proto @@ -16,5 +16,5 @@ message Lua { // further loads code from disk if desired. Note that if JSON configuration is used, the code must // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. - string inline_code = 1 [(validate.rules).string.min_bytes = 1]; + string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto index 5c09b860fc5c..e2de31ea0b8c 100644 --- a/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto +++ b/api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto @@ -15,7 +15,6 @@ import "validate/validate.proto"; // for the request. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. message OriginalSrc { - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to // ensure that non-local addresses may be routed back through envoy when binding to the original // source address. The option will not be applied if the mark is 0. diff --git a/api/envoy/config/filter/http/original_src/v3alpha/original_src.proto b/api/envoy/config/filter/http/original_src/v3alpha/original_src.proto index 20bd0e920e26..b4ac0a19074b 100644 --- a/api/envoy/config/filter/http/original_src/v3alpha/original_src.proto +++ b/api/envoy/config/filter/http/original_src/v3alpha/original_src.proto @@ -15,7 +15,6 @@ import "validate/validate.proto"; // for the request. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. message OriginalSrc { - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to // ensure that non-local addresses may be routed back through envoy when binding to the original // source address. The option will not be applied if the mark is 0. diff --git a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto index 08189be1df89..1cfd362d86e5 100644 --- a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto @@ -17,7 +17,7 @@ import "validate/validate.proto"; message RateLimit { // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string.min_bytes = 1]; + string domain = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies the rate limit configurations to be applied with the same // stage number. If not set, the default stage number is 0. @@ -25,7 +25,7 @@ message RateLimit { // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32.lte = 10]; + uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The type of requests the filter should apply to. The supported // types are *internal*, *external* or *both*. A request is considered internal if @@ -53,6 +53,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7 - [(validate.rules).message.required = true]; + ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto index 091c5d3d337a..8fde8cff211a 100644 --- a/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto +++ b/api/envoy/config/filter/http/rate_limit/v3alpha/rate_limit.proto @@ -17,7 +17,7 @@ import "validate/validate.proto"; message RateLimit { // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string.min_bytes = 1]; + string domain = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies the rate limit configurations to be applied with the same // stage number. If not set, the default stage number is 0. @@ -25,7 +25,7 @@ message RateLimit { // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32.lte = 10]; + uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The type of requests the filter should apply to. The supported // types are *internal*, *external* or *both*. A request is considered internal if @@ -53,6 +53,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - envoy.config.ratelimit.v3alpha.RateLimitServiceConfig rate_limit_service = 7 - [(validate.rules).message.required = true]; + ratelimit.v3alpha.RateLimitServiceConfig rate_limit_service = 7 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/http/rbac/v2/rbac.proto b/api/envoy/config/filter/http/rbac/v2/rbac.proto index 7c9a3c24d017..4bdd8c5f2c9c 100644 --- a/api/envoy/config/filter/http/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/http/rbac/v2/rbac.proto @@ -28,8 +28,6 @@ message RBAC { message RBACPerRoute { reserved 1; - reserved "disabled"; - // Override the global configuration of the filter with this new config. // If absent, the global RBAC policy will be disabled for this route. RBAC rbac = 2; diff --git a/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto index 3ffe04ec3a31..4dc9dab5c1a1 100644 --- a/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto +++ b/api/envoy/config/filter/http/rbac/v3alpha/rbac.proto @@ -28,8 +28,6 @@ message RBAC { message RBACPerRoute { reserved 1; - reserved "disabled"; - // Override the global configuration of the filter with this new config. // If absent, the global RBAC policy will be disabled for this route. RBAC rbac = 2; diff --git a/api/envoy/config/filter/http/router/v2/router.proto b/api/envoy/config/filter/http/router/v2/router.proto index fd0cadec9631..7543069af029 100644 --- a/api/envoy/config/filter/http/router/v2/router.proto +++ b/api/envoy/config/filter/http/router/v2/router.proto @@ -30,7 +30,7 @@ message Router { // are configured in the same way as access logs, but each log entry represents // an upstream request. Presuming retries are configured, multiple upstream // requests may be made for each downstream (inbound) request. - repeated envoy.config.filter.accesslog.v2.AccessLog upstream_log = 3; + repeated accesslog.v2.AccessLog upstream_log = 3; // Do not add any additional *x-envoy-* headers to requests or responses. This // only affects the :ref:`router filter generated *x-envoy-* headers @@ -54,13 +54,15 @@ message Router { // * :ref:`config_http_filters_router_x-envoy-max-retries` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - repeated string strict_check_headers = 5 [(validate.rules).repeated .items.string = { - in: [ - "x-envoy-upstream-rq-timeout-ms", - "x-envoy-upstream-rq-per-try-timeout-ms", - "x-envoy-max-retries", - "x-envoy-retry-grpc-on", - "x-envoy-retry-on" - ] + repeated string strict_check_headers = 5 [(validate.rules).repeated = { + items { + string { + in: "x-envoy-upstream-rq-timeout-ms" + in: "x-envoy-upstream-rq-per-try-timeout-ms" + in: "x-envoy-max-retries" + in: "x-envoy-retry-grpc-on" + in: "x-envoy-retry-on" + } + } }]; } diff --git a/api/envoy/config/filter/http/router/v3alpha/router.proto b/api/envoy/config/filter/http/router/v3alpha/router.proto index a4ceae7dc1f7..168bbbd8f2b1 100644 --- a/api/envoy/config/filter/http/router/v3alpha/router.proto +++ b/api/envoy/config/filter/http/router/v3alpha/router.proto @@ -30,7 +30,7 @@ message Router { // are configured in the same way as access logs, but each log entry represents // an upstream request. Presuming retries are configured, multiple upstream // requests may be made for each downstream (inbound) request. - repeated envoy.config.filter.accesslog.v3alpha.AccessLog upstream_log = 3; + repeated accesslog.v3alpha.AccessLog upstream_log = 3; // Do not add any additional *x-envoy-* headers to requests or responses. This // only affects the :ref:`router filter generated *x-envoy-* headers @@ -54,13 +54,15 @@ message Router { // * :ref:`config_http_filters_router_x-envoy-max-retries` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - repeated string strict_check_headers = 5 [(validate.rules).repeated .items.string = { - in: [ - "x-envoy-upstream-rq-timeout-ms", - "x-envoy-upstream-rq-per-try-timeout-ms", - "x-envoy-max-retries", - "x-envoy-retry-grpc-on", - "x-envoy-retry-on" - ] + repeated string strict_check_headers = 5 [(validate.rules).repeated = { + items { + string { + in: "x-envoy-upstream-rq-timeout-ms" + in: "x-envoy-upstream-rq-per-try-timeout-ms" + in: "x-envoy-max-retries" + in: "x-envoy-retry-grpc-on" + in: "x-envoy-retry-on" + } + } }]; } diff --git a/api/envoy/config/filter/http/squash/v2/squash.proto b/api/envoy/config/filter/http/squash/v2/squash.proto index 54a67ceddf1c..e75ee5d67540 100644 --- a/api/envoy/config/filter/http/squash/v2/squash.proto +++ b/api/envoy/config/filter/http/squash/v2/squash.proto @@ -17,7 +17,7 @@ import "validate/validate.proto"; // [#proto-status: experimental] message Squash { // The name of the cluster that hosts the Squash server. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // When the filter requests the Squash server to create a DebugAttachment, it will use this // structure as template for the body of the request. It can contain reference to environment diff --git a/api/envoy/config/filter/http/squash/v3alpha/squash.proto b/api/envoy/config/filter/http/squash/v3alpha/squash.proto index a1b355e67cb8..432224eca333 100644 --- a/api/envoy/config/filter/http/squash/v3alpha/squash.proto +++ b/api/envoy/config/filter/http/squash/v3alpha/squash.proto @@ -17,7 +17,7 @@ import "validate/validate.proto"; // [#proto-status: experimental] message Squash { // The name of the cluster that hosts the Squash server. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // When the filter requests the Squash server to create a DebugAttachment, it will use this // structure as template for the body of the request. It can contain reference to environment diff --git a/api/envoy/config/filter/http/tap/v2alpha/tap.proto b/api/envoy/config/filter/http/tap/v2alpha/tap.proto index 10d7bdd1e0d8..ee9027055ab9 100644 --- a/api/envoy/config/filter/http/tap/v2alpha/tap.proto +++ b/api/envoy/config/filter/http/tap/v2alpha/tap.proto @@ -1,15 +1,15 @@ syntax = "proto3"; -import "envoy/config/common/tap/v2alpha/common.proto"; - -import "validate/validate.proto"; - package envoy.config.filter.http.tap.v2alpha; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha"; +import "envoy/config/common/tap/v2alpha/common.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. @@ -17,5 +17,5 @@ option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha"; message Tap { // Common configuration for the HTTP tap filter. common.tap.v2alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/http/tap/v3alpha/tap.proto b/api/envoy/config/filter/http/tap/v3alpha/tap.proto index e92c7d229f5e..f3ec07e10438 100644 --- a/api/envoy/config/filter/http/tap/v3alpha/tap.proto +++ b/api/envoy/config/filter/http/tap/v3alpha/tap.proto @@ -1,15 +1,15 @@ syntax = "proto3"; -import "envoy/config/common/tap/v3alpha/common.proto"; - -import "validate/validate.proto"; - package envoy.config.filter.http.tap.v3alpha; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v3alpha"; +import "envoy/config/common/tap/v3alpha/common.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. @@ -17,5 +17,5 @@ option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v3alpha"; message Tap { // Common configuration for the HTTP tap filter. common.tap.v3alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto index 85f837fa794f..42947918f2db 100644 --- a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto +++ b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto @@ -12,27 +12,6 @@ import "validate/validate.proto"; // gRPC-JSON transcoder :ref:`configuration overview `. message GrpcJsonTranscoder { - oneof descriptor_set { - option (validate.required) = true; - - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - string proto_descriptor = 1; - - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - bytes proto_descriptor_bin = 4; - } - - // A list of strings that - // supplies the fully qualified service names (i.e. "package_name.service_name") that - // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, - // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than - // the service names specified here, but they won't be translated. - repeated string services = 2 [(validate.rules).repeated .min_items = 1]; - message PrintOptions { // Whether to add spaces, line breaks and indentation to make the JSON // output easy to read. Defaults to false. @@ -53,7 +32,28 @@ message GrpcJsonTranscoder { // generate JSON field names using the ``json_name`` option, or lower camel case, // in that order. Setting this flag will preserve the original field names. Defaults to false. bool preserve_proto_field_names = 4; - }; + } + + oneof descriptor_set { + option (validate.required) = true; + + // Supplies the filename of + // :ref:`the proto descriptor set ` for the gRPC + // services. + string proto_descriptor = 1; + + // Supplies the binary content of + // :ref:`the proto descriptor set ` for the gRPC + // services. + bytes proto_descriptor_bin = 4; + } + + // A list of strings that + // supplies the fully qualified service names (i.e. "package_name.service_name") that + // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, + // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than + // the service names specified here, but they won't be translated. + repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; // Control options for response JSON. These options are passed directly to // `JsonPrintOptions `. + bool convert_grpc_status = 9; } diff --git a/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto b/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto index 630ad245a8a6..542075fe78a5 100644 --- a/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto +++ b/api/envoy/config/filter/http/transcoder/v3alpha/transcoder.proto @@ -12,27 +12,6 @@ import "validate/validate.proto"; // gRPC-JSON transcoder :ref:`configuration overview `. message GrpcJsonTranscoder { - oneof descriptor_set { - option (validate.required) = true; - - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - string proto_descriptor = 1; - - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - bytes proto_descriptor_bin = 4; - } - - // A list of strings that - // supplies the fully qualified service names (i.e. "package_name.service_name") that - // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, - // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than - // the service names specified here, but they won't be translated. - repeated string services = 2 [(validate.rules).repeated .min_items = 1]; - message PrintOptions { // Whether to add spaces, line breaks and indentation to make the JSON // output easy to read. Defaults to false. @@ -53,7 +32,28 @@ message GrpcJsonTranscoder { // generate JSON field names using the ``json_name`` option, or lower camel case, // in that order. Setting this flag will preserve the original field names. Defaults to false. bool preserve_proto_field_names = 4; - }; + } + + oneof descriptor_set { + option (validate.required) = true; + + // Supplies the filename of + // :ref:`the proto descriptor set ` for the gRPC + // services. + string proto_descriptor = 1; + + // Supplies the binary content of + // :ref:`the proto descriptor set ` for the gRPC + // services. + bytes proto_descriptor_bin = 4; + } + + // A list of strings that + // supplies the fully qualified service names (i.e. "package_name.service_name") that + // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, + // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than + // the service names specified here, but they won't be translated. + repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; // Control options for response JSON. These options are passed directly to // `JsonPrintOptions `. + bool convert_grpc_status = 9; } diff --git a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto index 11f55a787fdf..e939e2d8dd97 100644 --- a/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto +++ b/api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto @@ -15,7 +15,6 @@ import "validate/validate.proto"; // for the connection. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. message OriginalSrc { - // Whether to bind the port to the one used in the original downstream connection. // [#not-implemented-warn:] bool bind_port = 1; diff --git a/api/envoy/config/filter/listener/original_src/v3alpha/original_src.proto b/api/envoy/config/filter/listener/original_src/v3alpha/original_src.proto index 3c5fee9505a2..b934607dc2a9 100644 --- a/api/envoy/config/filter/listener/original_src/v3alpha/original_src.proto +++ b/api/envoy/config/filter/listener/original_src/v3alpha/original_src.proto @@ -15,7 +15,6 @@ import "validate/validate.proto"; // for the connection. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. message OriginalSrc { - // Whether to bind the port to the one used in the original downstream connection. // [#not-implemented-warn:] bool bind_port = 1; diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto index bfd59dd5e804..0a51432321c7 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -7,6 +7,7 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2"; import "envoy/api/v2/core/address.proto"; + import "google/protobuf/duration.proto"; import "validate/validate.proto"; @@ -20,11 +21,11 @@ message ClientSSLAuth { // the authentication service. The filter will connect to the service every 60s to fetch the list // of principals. The service must support the expected :ref:`REST API // `. - string auth_api_cluster = 1 [(validate.rules).string.min_bytes = 1]; + string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; // Time in milliseconds between principal refreshes from the // authentication service. Default is 60000 (60s). The actual fetch time @@ -35,5 +36,5 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no // IP white list. - repeated envoy.api.v2.core.CidrRange ip_white_list = 4; + repeated api.v2.core.CidrRange ip_white_list = 4; } diff --git a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto index e9a27d151ea5..6cf616d96f67 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto +++ b/api/envoy/config/filter/network/client_ssl_auth/v3alpha/client_ssl_auth.proto @@ -7,6 +7,7 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v3alpha"; import "envoy/api/v3alpha/core/address.proto"; + import "google/protobuf/duration.proto"; import "validate/validate.proto"; @@ -20,11 +21,11 @@ message ClientSSLAuth { // the authentication service. The filter will connect to the service every 60s to fetch the list // of principals. The service must support the expected :ref:`REST API // `. - string auth_api_cluster = 1 [(validate.rules).string.min_bytes = 1]; + string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // The prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; // Time in milliseconds between principal refreshes from the // authentication service. Default is 60000 (60s). The actual fetch time @@ -35,5 +36,5 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no // IP white list. - repeated envoy.api.v3alpha.core.CidrRange ip_white_list = 4; + repeated api.v3alpha.core.CidrRange ip_white_list = 4; } diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto index a27d7001cb86..b46bf9384469 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto @@ -15,16 +15,28 @@ import "validate/validate.proto"; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. +// Dubbo Protocol types supported by Envoy. +enum ProtocolType { + // the default protocol. + Dubbo = 0; +} + +// Dubbo Serialization types supported by Envoy. +enum SerializationType { + // the default serialization protocol. + Hessian2 = 0; +} + // [#comment:next free field: 6] message DubboProxy { // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // Configure the protocol used. - ProtocolType protocol_type = 2 [(validate.rules).enum.defined_only = true]; + ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; // Configure the serialization protocol used. - SerializationType serialization_type = 3 [(validate.rules).enum.defined_only = true]; + SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; // The route table for the connection manager is static and is specified in this property. repeated RouteConfiguration route_config = 4; @@ -36,22 +48,12 @@ message DubboProxy { repeated DubboFilter dubbo_filters = 5; } -// Dubbo Protocol types supported by Envoy. -enum ProtocolType { - Dubbo = 0; // the default protocol. -} - -// Dubbo Serialization types supported by Envoy. -enum SerializationType { - Hessian2 = 0; // the default serialization protocol. -} - // DubboFilter configures a Dubbo filter. // [#comment:next free field: 3] message DubboFilter { // The name of the filter to instantiate. The name must match a supported // filter. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto index 02d86443a6f3..77565fb3a771 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto @@ -39,10 +39,10 @@ message RouteConfiguration { // [#comment:next free field: 3] message Route { // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message.required = true]; + RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message.required = true]; + RouteAction route = 2 [(validate.rules).message = {required: true}]; } // [#comment:next free field: 3] @@ -54,7 +54,7 @@ message RouteMatch { // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). - repeated envoy.api.v2.route.HeaderMatcher headers = 2; + repeated api.v2.route.HeaderMatcher headers = 2; } // [#comment:next free field: 3] @@ -69,15 +69,12 @@ message RouteAction { // request is routed to one of the upstream clusters based on weights // assigned to each cluster. // Currently ClusterWeight only supports the name and weight fields. - envoy.api.v2.route.WeightedCluster weighted_clusters = 2; + api.v2.route.WeightedCluster weighted_clusters = 2; } } // [#comment:next free field: 5] message MethodMatch { - // The name of the method. - envoy.type.matcher.StringMatcher name = 1; - // The parameter matching type. message ParameterMatchSpecifier { oneof parameter_match_specifier { @@ -95,10 +92,13 @@ message MethodMatch { // // * For range [-10,0), route will match for header value -1, but not for 0, // "somestring", 10.9, "-1somestring" - envoy.type.Int64Range range_match = 4; + type.Int64Range range_match = 4; } } + // The name of the method. + type.matcher.StringMatcher name = 1; + // Method parameter definition. // The key is the parameter index, starting from 0. // The value is the parameter matching type. diff --git a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto index 211c4cfed1cb..b48c91cac253 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/dubbo_proxy.proto @@ -15,16 +15,28 @@ import "validate/validate.proto"; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. +// Dubbo Protocol types supported by Envoy. +enum ProtocolType { + // the default protocol. + Dubbo = 0; +} + +// Dubbo Serialization types supported by Envoy. +enum SerializationType { + // the default serialization protocol. + Hessian2 = 0; +} + // [#comment:next free field: 6] message DubboProxy { // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // Configure the protocol used. - ProtocolType protocol_type = 2 [(validate.rules).enum.defined_only = true]; + ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; // Configure the serialization protocol used. - SerializationType serialization_type = 3 [(validate.rules).enum.defined_only = true]; + SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; // The route table for the connection manager is static and is specified in this property. repeated RouteConfiguration route_config = 4; @@ -36,22 +48,12 @@ message DubboProxy { repeated DubboFilter dubbo_filters = 5; } -// Dubbo Protocol types supported by Envoy. -enum ProtocolType { - Dubbo = 0; // the default protocol. -} - -// Dubbo Serialization types supported by Envoy. -enum SerializationType { - Hessian2 = 0; // the default serialization protocol. -} - // DubboFilter configures a Dubbo filter. // [#comment:next free field: 3] message DubboFilter { // The name of the filter to instantiate. The name must match a supported // filter. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. diff --git a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/route.proto b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/route.proto index 6d357b8c138f..06569b88464d 100644 --- a/api/envoy/config/filter/network/dubbo_proxy/v3alpha/route.proto +++ b/api/envoy/config/filter/network/dubbo_proxy/v3alpha/route.proto @@ -39,10 +39,10 @@ message RouteConfiguration { // [#comment:next free field: 3] message Route { // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message.required = true]; + RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message.required = true]; + RouteAction route = 2 [(validate.rules).message = {required: true}]; } // [#comment:next free field: 3] @@ -54,7 +54,7 @@ message RouteMatch { // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). - repeated envoy.api.v3alpha.route.HeaderMatcher headers = 2; + repeated api.v3alpha.route.HeaderMatcher headers = 2; } // [#comment:next free field: 3] @@ -69,15 +69,12 @@ message RouteAction { // request is routed to one of the upstream clusters based on weights // assigned to each cluster. // Currently ClusterWeight only supports the name and weight fields. - envoy.api.v3alpha.route.WeightedCluster weighted_clusters = 2; + api.v3alpha.route.WeightedCluster weighted_clusters = 2; } } // [#comment:next free field: 5] message MethodMatch { - // The name of the method. - envoy.type.matcher.StringMatcher name = 1; - // The parameter matching type. message ParameterMatchSpecifier { oneof parameter_match_specifier { @@ -95,10 +92,13 @@ message MethodMatch { // // * For range [-10,0), route will match for header value -1, but not for 0, // "somestring", 10.9, "-1somestring" - envoy.type.Int64Range range_match = 4; + type.Int64Range range_match = 4; } } + // The name of the method. + type.matcher.StringMatcher name = 1; + // Method parameter definition. // The key is the parameter index, starting from 0. // The value is the parameter matching type. diff --git a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto index 8d0a6c6ca246..bc9ed5d51ca4 100644 --- a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto @@ -20,11 +20,11 @@ import "validate/validate.proto"; // A failed check will cause this filter to close the TCP connection. message ExtAuthz { // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The external authorization gRPC service configuration. // The default timeout is set to 200ms by this filter. - envoy.api.v2.core.GrpcService grpc_service = 2; + api.v2.core.GrpcService grpc_service = 2; // The filter's behaviour in case the external authorization service does // not respond back. When it is set to true, Envoy will also allow traffic in case of diff --git a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto index c53b509fee79..574fd170da18 100644 --- a/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto +++ b/api/envoy/config/filter/network/ext_authz/v3alpha/ext_authz.proto @@ -20,11 +20,11 @@ import "validate/validate.proto"; // A failed check will cause this filter to close the TCP connection. message ExtAuthz { // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The external authorization gRPC service configuration. // The default timeout is set to 200ms by this filter. - envoy.api.v3alpha.core.GrpcService grpc_service = 2; + api.v3alpha.core.GrpcService grpc_service = 2; // The filter's behaviour in case the external authorization service does // not respond back. When it is set to true, Envoy will also allow traffic in case of diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 05c351eb14d8..efdfb4be9392 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -26,7 +26,6 @@ import "validate/validate.proto"; // [#comment:next free field: 35] message HttpConnectionManager { enum CodecType { - // For every new connection, the connection manager will determine which // codec to use. This mode supports both ALPN for TLS listeners as well as // protocol inference for plaintext listeners. If ALPN data is available, it @@ -43,42 +42,44 @@ message HttpConnectionManager { HTTP2 = 2; } - // Supplies the type of codec that the connection manager should use. - CodecType codec_type = 1 [(validate.rules).enum.defined_only = true]; + enum ServerHeaderTransformation { + // Overwrite any Server header with the contents of server_name. + OVERWRITE = 0; - // The human readable prefix to use when emitting statistics for the - // connection manager. See the :ref:`statistics documentation ` for - // more information. - string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + // If no Server header is present, append Server server_name + // If a Server header is present, pass it through. + APPEND_IF_ABSENT = 1; - oneof route_specifier { - option (validate.required) = true; + // Pass through the value of the server header, and do not append a header + // if none is present. + PASS_THROUGH = 2; + } - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + enum ForwardClientCertDetails { + // Do not send the XFCC header to the next hop. This is the default value. + SANITIZE = 0; - // The route table for the connection manager is static and is specified in this property. - envoy.api.v2.RouteConfiguration route_config = 4; + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + FORWARD_ONLY = 1; - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + APPEND_FORWARD = 2; - // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + SANITIZE_SET = 3; - // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + ALWAYS_FORWARD_ONLY = 4; + } message Tracing { enum OperationName { - // The HTTP listener is used for ingress/incoming requests. INGRESS = 0; @@ -92,7 +93,8 @@ message HttpConnectionManager { // // .. attention:: // This field has been deprecated in favor of `traffic_direction`. - OperationName operation_name = 1 [(validate.rules).enum.defined_only = true, deprecated = true]; + OperationName operation_name = 1 + [(validate.rules).enum = {defined_only: true}, deprecated = true]; // A list of header names used to create tags for the active span. The header name is used to // populate the tag name, and the header value is used to populate the tag value. The tag is @@ -105,14 +107,14 @@ message HttpConnectionManager { // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% - envoy.type.Percent client_sampling = 3; + type.Percent client_sampling = 3; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.Percent random_sampling = 4; + type.Percent random_sampling = 4; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random @@ -122,7 +124,7 @@ message HttpConnectionManager { // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.Percent overall_sampling = 5; + type.Percent overall_sampling = 5; // Whether to annotate spans with additional data. If true, spans will include logs for stream // events. @@ -134,37 +136,124 @@ message HttpConnectionManager { google.protobuf.UInt32Value max_path_tag_length = 7; } + message InternalAddressConfig { + // Whether unix socket addresses should be considered internal. + bool unix_sockets = 1; + } + + // [#comment:next free field: 7] + message SetCurrentClientCertDetails { + reserved 2; + + // Whether to forward the subject of the client cert. Defaults to false. + google.protobuf.BoolValue subject = 1; + + // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + // XFCC header comma separated from other values with the value Cert="PEM". + // Defaults to false. + bool cert = 3; + + // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + // format. This will appear in the XFCC header comma separated from other values with the value + // Chain="PEM". + // Defaults to false. + bool chain = 6; + + // Whether to forward the DNS type Subject Alternative Names of the client cert. + // Defaults to false. + bool dns = 4; + + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. + bool uri = 5; + } + + // The configuration for HTTP upgrades. + // For each upgrade type desired, an UpgradeConfig must be added. + // + // .. warning:: + // + // The current implementation of upgrade headers does not handle + // multi-valued upgrade headers. Support for multi-valued headers may be + // added in the future if needed. + // + // .. warning:: + // The current implementation of upgrade headers does not work with HTTP/2 + // upstreams. + message UpgradeConfig { + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] + // will be proxied upstream. + string upgrade_type = 1; + + // If present, this represents the filter chain which will be created for + // this type of upgrade. If no filters are present, the filter chain for + // HTTP connections will be used for this upgrade type. + repeated HttpFilter filters = 2; + + // Determines if upgrades are enabled or disabled by default. Defaults to true. + // This can be overridden on a per-route basis with :ref:`cluster + // ` as documented in the + // :ref:`upgrade documentation `. + google.protobuf.BoolValue enabled = 3; + } + + reserved 27; + + // Supplies the type of codec that the connection manager should use. + CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics for the + // connection manager. See the :ref:`statistics documentation ` for + // more information. + string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; + + // The route table for the connection manager is static and is specified in this property. + api.v2.RouteConfiguration route_config = 4; + + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes scoped_routes = 31; + } + + // A list of individual HTTP filters that make up the filter chain for + // requests made to the connection manager. Order matters as the filters are + // processed sequentially as request events happen. + repeated HttpFilter http_filters = 5; + + // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + // documentation for more information. Defaults to false. + google.protobuf.BoolValue add_user_agent = 6; + // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. Tracing tracing = 7; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - envoy.api.v2.core.Http1ProtocolOptions http_protocol_options = 8; + api.v2.core.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - envoy.api.v2.core.Http2ProtocolOptions http2_protocol_options = 9; + api.v2.core.Http2ProtocolOptions http2_protocol_options = 9; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. string server_name = 10; - enum ServerHeaderTransformation { - - // Overwrite any Server header with the contents of server_name. - OVERWRITE = 0; - // If no Server header is present, append Server server_name - // If a Server header is present, pass it through. - APPEND_IF_ABSENT = 1; - // Pass through the value of the server header, and do not append a header - // if none is present. - PASS_THROUGH = 2; - } // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in // server_name. ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum.defined_only = true]; + [(validate.rules).enum = {defined_only: true}]; // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. @@ -172,7 +261,7 @@ message HttpConnectionManager { // The max configurable limit is 96 KiB, based on current implementation // constraints. google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32.gt = 0, (validate.rules).uint32.lte = 96]; + [(validate.rules).uint32 = {lte: 96 gt: 0}]; // The idle timeout for connections managed by the connection manager. The // idle timeout is defined as the period in which there are no active @@ -261,7 +350,7 @@ message HttpConnectionManager { // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - repeated envoy.config.filter.accesslog.v2.AccessLog access_log = 13; + repeated accesslog.v2.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating @@ -279,11 +368,6 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. uint32 xff_num_trusted_hops = 19; - message InternalAddressConfig { - // Whether unix socket addresses should be considered internal. - bool unix_sockets = 1; - } - // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more @@ -316,61 +400,10 @@ message HttpConnectionManager { // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - enum ForwardClientCertDetails { - - // Do not send the XFCC header to the next hop. This is the default value. - SANITIZE = 0; - - // When the client connection is mTLS (Mutual TLS), forward the XFCC header - // in the request. - FORWARD_ONLY = 1; - - // When the client connection is mTLS, append the client certificate - // information to the request’s XFCC header and forward it. - APPEND_FORWARD = 2; - - // When the client connection is mTLS, reset the XFCC header with the client - // certificate information and send it to the next hop. - SANITIZE_SET = 3; - - // Always forward the XFCC header in the request, regardless of whether the - // client connection is mTLS. - ALWAYS_FORWARD_ONLY = 4; - }; - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum.defined_only = true]; - - // [#comment:next free field: 7] - message SetCurrentClientCertDetails { - // Whether to forward the subject of the client cert. Defaults to false. - google.protobuf.BoolValue subject = 1; - - reserved 2; // san deprecated by uri - - // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - // XFCC header comma separated from other values with the value Cert="PEM". - // Defaults to false. - bool cert = 3; - - // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - // format. This will appear in the XFCC header comma separated from other values with the value - // Chain="PEM". - // Defaults to false. - bool chain = 6; - - // Whether to forward the DNS type Subject Alternative Names of the client cert. - // Defaults to false. - bool dns = 4; - - // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - // false. - bool uri = 5; - }; + [(validate.rules).enum = {defined_only: true}]; // This field is valid only when :ref:`forward_client_cert_details // ` @@ -402,38 +435,8 @@ message HttpConnectionManager { // [#not-implemented-hide:] bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - // The configuration for HTTP upgrades. - // For each upgrade type desired, an UpgradeConfig must be added. - // - // .. warning:: - // - // The current implementation of upgrade headers does not handle - // multi-valued upgrade headers. Support for multi-valued headers may be - // added in the future if needed. - // - // .. warning:: - // The current implementation of upgrade headers does not work with HTTP/2 - // upstreams. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] - // will be proxied upstream. - string upgrade_type = 1; - // If present, this represents the filter chain which will be created for - // this type of upgrade. If no filters are present, the filter chain for - // HTTP connections will be used for this upgrade type. - repeated HttpFilter filters = 2; - // Determines if upgrades are enabled or disabled by default. Defaults to true. - // This can be overridden on a per-route basis with :ref:`cluster - // ` as documented in the - // :ref:`upgrade documentation `. - google.protobuf.BoolValue enabled = 3; - }; repeated UpgradeConfig upgrade_configs = 23; - reserved 27; - // Should paths be normalized according to RFC 3986 before any processing of // requests by HTTP filters or routing? This affects the upstream *:path* header // as well. For paths that fail this check, Envoy will respond with 400 to @@ -457,25 +460,22 @@ message HttpConnectionManager { message Rds { // Configuration source specifier for RDS. - envoy.api.v2.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; + api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // The name of the route configuration. This name will be passed to the RDS // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. - string route_config_name = 2 [(validate.rules).string.min_bytes = 1]; + string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; } // This message is used to work around the limitations with 'oneof' and repeated fields. message ScopedRouteConfigurationsList { - repeated envoy.api.v2.ScopedRouteConfiguration scoped_route_configurations = 1 - [(validate.rules).repeated .min_items = 1]; + repeated api.v2.ScopedRouteConfiguration scoped_route_configurations = 1 + [(validate.rules).repeated = {min_items: 1}]; } message ScopedRoutes { - // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string.min_bytes = 1]; - // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These // keys are matched against a set of :ref:`Key` // objects assembled from :ref:`ScopedRouteConfiguration` @@ -506,15 +506,6 @@ message ScopedRoutes { // // Each 'a=b' key-value pair constitutes an 'element' of the header field. message HeaderValueExtractor { - // The name of the header field to extract the value from. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // The element separator (e.g., ';' separates 'a;b;c;d'). - // Default: empty string. This causes the entirety of the header field to be extracted. - // If this field is set to an empty string and 'index' is used in the oneof below, 'index' - // must be set to 0. - string element_separator = 2; - // Specifies a header field's key value pair to match on. message KvElement { // The separator between key and value (e.g., '=' separates 'k=v;...'). @@ -522,12 +513,21 @@ message ScopedRoutes { // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string.min_bytes = 1]; + string separator = 1 [(validate.rules).string = {min_bytes: 1}]; // The key to match on. - string key = 2 [(validate.rules).string.min_bytes = 1]; + string key = 2 [(validate.rules).string = {min_bytes: 1}]; } + // The name of the header field to extract the value from. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The element separator (e.g., ';' separates 'a;b;c;d'). + // Default: empty string. This causes the entirety of the header field to be extracted. + // If this field is set to an empty string and 'index' is used in the oneof below, 'index' + // must be set to 0. + string element_separator = 2; + oneof extract_type { // Specifies the zero based index of the element to extract. // Note Envoy concatenates multiple values of the same header key into a comma separated @@ -548,16 +548,19 @@ message ScopedRoutes { } // The final scope key consists of the ordered union of these fragments. - repeated FragmentBuilder fragments = 1 [(validate.rules).repeated .min_items = 1]; + repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } + // The name assigned to the scoped routing configuration. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // The algorithm to use for constructing a scope key for each request. - ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message.required = true]; + ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; // Configuration source specifier for RDS. // This config source is used to subscribe to RouteConfiguration resources specified in // ScopedRouteConfiguration messages. - envoy.api.v2.core.ConfigSource rds_config_source = 3 [(validate.rules).message.required = true]; + api.v2.core.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; oneof config_specifier { option (validate.required) = true; @@ -580,14 +583,16 @@ message ScopedRoutes { message ScopedRds { // Configuration source specifier for scoped RDS. - envoy.api.v2.core.ConfigSource scoped_rds_config_source = 1 - [(validate.rules).message.required = true]; + api.v2.core.ConfigSource scoped_rds_config_source = 1 + [(validate.rules).message = {required: true}]; } message HttpFilter { + reserved 3; + // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. @@ -596,6 +601,4 @@ message HttpFilter { google.protobuf.Any typed_config = 4; } - - reserved 3; } diff --git a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto index 24e56507cd8d..b5cebc774ddf 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v3alpha/http_connection_manager.proto @@ -26,7 +26,6 @@ import "validate/validate.proto"; // [#comment:next free field: 35] message HttpConnectionManager { enum CodecType { - // For every new connection, the connection manager will determine which // codec to use. This mode supports both ALPN for TLS listeners as well as // protocol inference for plaintext listeners. If ALPN data is available, it @@ -43,43 +42,44 @@ message HttpConnectionManager { HTTP2 = 2; } - // Supplies the type of codec that the connection manager should use. - CodecType codec_type = 1 [(validate.rules).enum.defined_only = true]; + enum ServerHeaderTransformation { + // Overwrite any Server header with the contents of server_name. + OVERWRITE = 0; - // The human readable prefix to use when emitting statistics for the - // connection manager. See the :ref:`statistics documentation ` for - // more information. - string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + // If no Server header is present, append Server server_name + // If a Server header is present, pass it through. + APPEND_IF_ABSENT = 1; - oneof route_specifier { - option (validate.required) = true; + // Pass through the value of the server header, and do not append a header + // if none is present. + PASS_THROUGH = 2; + } - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + enum ForwardClientCertDetails { + // Do not send the XFCC header to the next hop. This is the default value. + SANITIZE = 0; - // The route table for the connection manager is static and is specified in this property. - envoy.api.v3alpha.RouteConfiguration route_config = 4; + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + FORWARD_ONLY = 1; - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + APPEND_FORWARD = 2; - // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + SANITIZE_SET = 3; - // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + ALWAYS_FORWARD_ONLY = 4; + } message Tracing { - // [#comment:TODO(kyessenov): Align this field with listener traffic direction field.] enum OperationName { - // The HTTP listener is used for ingress/incoming requests. INGRESS = 0; @@ -87,8 +87,14 @@ message HttpConnectionManager { EGRESS = 1; } - // The span name will be derived from this field. - OperationName operation_name = 1 [(validate.rules).enum.defined_only = true]; + // The span name will be derived from this field. If + // :ref:`traffic_direction ` is + // specified on the parent listener, then it is used instead of this field. + // + // .. attention:: + // This field has been deprecated in favor of `traffic_direction`. + OperationName operation_name = 1 + [(validate.rules).enum = {defined_only: true}, deprecated = true]; // A list of header names used to create tags for the active span. The header name is used to // populate the tag name, and the header value is used to populate the tag value. The tag is @@ -101,14 +107,14 @@ message HttpConnectionManager { // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% - envoy.type.Percent client_sampling = 3; + type.Percent client_sampling = 3; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.Percent random_sampling = 4; + type.Percent random_sampling = 4; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random @@ -118,44 +124,136 @@ message HttpConnectionManager { // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% - envoy.type.Percent overall_sampling = 5; + type.Percent overall_sampling = 5; // Whether to annotate spans with additional data. If true, spans will include logs for stream // events. bool verbose = 6; + + // Maximum length of the request path to extract and include in the HttpUrl tag. Used to + // truncate lengthy request paths to meet the needs of a tracing backend. + // Default: 256 + google.protobuf.UInt32Value max_path_tag_length = 7; + } + + message InternalAddressConfig { + // Whether unix socket addresses should be considered internal. + bool unix_sockets = 1; + } + + // [#comment:next free field: 7] + message SetCurrentClientCertDetails { + reserved 2; + + // Whether to forward the subject of the client cert. Defaults to false. + google.protobuf.BoolValue subject = 1; + + // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + // XFCC header comma separated from other values with the value Cert="PEM". + // Defaults to false. + bool cert = 3; + + // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + // format. This will appear in the XFCC header comma separated from other values with the value + // Chain="PEM". + // Defaults to false. + bool chain = 6; + + // Whether to forward the DNS type Subject Alternative Names of the client cert. + // Defaults to false. + bool dns = 4; + + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. + bool uri = 5; + } + + // The configuration for HTTP upgrades. + // For each upgrade type desired, an UpgradeConfig must be added. + // + // .. warning:: + // + // The current implementation of upgrade headers does not handle + // multi-valued upgrade headers. Support for multi-valued headers may be + // added in the future if needed. + // + // .. warning:: + // The current implementation of upgrade headers does not work with HTTP/2 + // upstreams. + message UpgradeConfig { + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] + // will be proxied upstream. + string upgrade_type = 1; + + // If present, this represents the filter chain which will be created for + // this type of upgrade. If no filters are present, the filter chain for + // HTTP connections will be used for this upgrade type. + repeated HttpFilter filters = 2; + + // Determines if upgrades are enabled or disabled by default. Defaults to true. + // This can be overridden on a per-route basis with :ref:`cluster + // ` as documented in the + // :ref:`upgrade documentation `. + google.protobuf.BoolValue enabled = 3; + } + + reserved 27; + + // Supplies the type of codec that the connection manager should use. + CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics for the + // connection manager. See the :ref:`statistics documentation ` for + // more information. + string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; + + // The route table for the connection manager is static and is specified in this property. + api.v3alpha.RouteConfiguration route_config = 4; + + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes scoped_routes = 31; } + // A list of individual HTTP filters that make up the filter chain for + // requests made to the connection manager. Order matters as the filters are + // processed sequentially as request events happen. + repeated HttpFilter http_filters = 5; + + // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + // documentation for more information. Defaults to false. + google.protobuf.BoolValue add_user_agent = 6; + // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. Tracing tracing = 7; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - envoy.api.v3alpha.core.Http1ProtocolOptions http_protocol_options = 8; + api.v3alpha.core.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - envoy.api.v3alpha.core.Http2ProtocolOptions http2_protocol_options = 9; + api.v3alpha.core.Http2ProtocolOptions http2_protocol_options = 9; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. string server_name = 10; - enum ServerHeaderTransformation { - - // Overwrite any Server header with the contents of server_name. - OVERWRITE = 0; - // If no Server header is present, append Server server_name - // If a Server header is present, pass it through. - APPEND_IF_ABSENT = 1; - // Pass through the value of the server header, and do not append a header - // if none is present. - PASS_THROUGH = 2; - } // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in // server_name. ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum.defined_only = true]; + [(validate.rules).enum = {defined_only: true}]; // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. @@ -163,7 +261,7 @@ message HttpConnectionManager { // The max configurable limit is 96 KiB, based on current implementation // constraints. google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32.gt = 0, (validate.rules).uint32.lte = 96]; + [(validate.rules).uint32 = {lte: 96 gt: 0}]; // The idle timeout for connections managed by the connection manager. The // idle timeout is defined as the period in which there are no active @@ -252,7 +350,7 @@ message HttpConnectionManager { // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - repeated envoy.config.filter.accesslog.v3alpha.AccessLog access_log = 13; + repeated accesslog.v3alpha.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating @@ -270,11 +368,6 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. uint32 xff_num_trusted_hops = 19; - message InternalAddressConfig { - // Whether unix socket addresses should be considered internal. - bool unix_sockets = 1; - } - // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more @@ -307,61 +400,10 @@ message HttpConnectionManager { // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - enum ForwardClientCertDetails { - - // Do not send the XFCC header to the next hop. This is the default value. - SANITIZE = 0; - - // When the client connection is mTLS (Mutual TLS), forward the XFCC header - // in the request. - FORWARD_ONLY = 1; - - // When the client connection is mTLS, append the client certificate - // information to the request’s XFCC header and forward it. - APPEND_FORWARD = 2; - - // When the client connection is mTLS, reset the XFCC header with the client - // certificate information and send it to the next hop. - SANITIZE_SET = 3; - - // Always forward the XFCC header in the request, regardless of whether the - // client connection is mTLS. - ALWAYS_FORWARD_ONLY = 4; - }; - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum.defined_only = true]; - - // [#comment:next free field: 7] - message SetCurrentClientCertDetails { - // Whether to forward the subject of the client cert. Defaults to false. - google.protobuf.BoolValue subject = 1; - - reserved 2; // san deprecated by uri - - // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - // XFCC header comma separated from other values with the value Cert="PEM". - // Defaults to false. - bool cert = 3; - - // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - // format. This will appear in the XFCC header comma separated from other values with the value - // Chain="PEM". - // Defaults to false. - bool chain = 6; - - // Whether to forward the DNS type Subject Alternative Names of the client cert. - // Defaults to false. - bool dns = 4; - - // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - // false. - bool uri = 5; - }; + [(validate.rules).enum = {defined_only: true}]; // This field is valid only when :ref:`forward_client_cert_details // ` @@ -393,38 +435,8 @@ message HttpConnectionManager { // [#not-implemented-hide:] bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - // The configuration for HTTP upgrades. - // For each upgrade type desired, an UpgradeConfig must be added. - // - // .. warning:: - // - // The current implementation of upgrade headers does not handle - // multi-valued upgrade headers. Support for multi-valued headers may be - // added in the future if needed. - // - // .. warning:: - // The current implementation of upgrade headers does not work with HTTP/2 - // upstreams. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] - // will be proxied upstream. - string upgrade_type = 1; - // If present, this represents the filter chain which will be created for - // this type of upgrade. If no filters are present, the filter chain for - // HTTP connections will be used for this upgrade type. - repeated HttpFilter filters = 2; - // Determines if upgrades are enabled or disabled by default. Defaults to true. - // This can be overridden on a per-route basis with :ref:`cluster - // ` as documented in the - // :ref:`upgrade documentation `. - google.protobuf.BoolValue enabled = 3; - }; repeated UpgradeConfig upgrade_configs = 23; - reserved 27; - // Should paths be normalized according to RFC 3986 before any processing of // requests by HTTP filters or routing? This affects the upstream *:path* header // as well. For paths that fail this check, Envoy will respond with 400 to @@ -448,25 +460,22 @@ message HttpConnectionManager { message Rds { // Configuration source specifier for RDS. - envoy.api.v3alpha.core.ConfigSource config_source = 1 [(validate.rules).message.required = true]; + api.v3alpha.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // The name of the route configuration. This name will be passed to the RDS // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. - string route_config_name = 2 [(validate.rules).string.min_bytes = 1]; + string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; } // This message is used to work around the limitations with 'oneof' and repeated fields. message ScopedRouteConfigurationsList { - repeated envoy.api.v3alpha.ScopedRouteConfiguration scoped_route_configurations = 1 - [(validate.rules).repeated .min_items = 1]; + repeated api.v3alpha.ScopedRouteConfiguration scoped_route_configurations = 1 + [(validate.rules).repeated = {min_items: 1}]; } message ScopedRoutes { - // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string.min_bytes = 1]; - // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These // keys are matched against a set of :ref:`Key` // objects assembled from :ref:`ScopedRouteConfiguration` @@ -497,15 +506,6 @@ message ScopedRoutes { // // Each 'a=b' key-value pair constitutes an 'element' of the header field. message HeaderValueExtractor { - // The name of the header field to extract the value from. - string name = 1 [(validate.rules).string.min_bytes = 1]; - - // The element separator (e.g., ';' separates 'a;b;c;d'). - // Default: empty string. This causes the entirety of the header field to be extracted. - // If this field is set to an empty string and 'index' is used in the oneof below, 'index' - // must be set to 0. - string element_separator = 2; - // Specifies a header field's key value pair to match on. message KvElement { // The separator between key and value (e.g., '=' separates 'k=v;...'). @@ -513,12 +513,21 @@ message ScopedRoutes { // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string.min_bytes = 1]; + string separator = 1 [(validate.rules).string = {min_bytes: 1}]; // The key to match on. - string key = 2 [(validate.rules).string.min_bytes = 1]; + string key = 2 [(validate.rules).string = {min_bytes: 1}]; } + // The name of the header field to extract the value from. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The element separator (e.g., ';' separates 'a;b;c;d'). + // Default: empty string. This causes the entirety of the header field to be extracted. + // If this field is set to an empty string and 'index' is used in the oneof below, 'index' + // must be set to 0. + string element_separator = 2; + oneof extract_type { // Specifies the zero based index of the element to extract. // Note Envoy concatenates multiple values of the same header key into a comma separated @@ -539,17 +548,19 @@ message ScopedRoutes { } // The final scope key consists of the ordered union of these fragments. - repeated FragmentBuilder fragments = 1 [(validate.rules).repeated .min_items = 1]; + repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } + // The name assigned to the scoped routing configuration. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + // The algorithm to use for constructing a scope key for each request. - ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message.required = true]; + ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; // Configuration source specifier for RDS. // This config source is used to subscribe to RouteConfiguration resources specified in // ScopedRouteConfiguration messages. - envoy.api.v3alpha.core.ConfigSource rds_config_source = 3 - [(validate.rules).message.required = true]; + api.v3alpha.core.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; oneof config_specifier { option (validate.required) = true; @@ -572,14 +583,16 @@ message ScopedRoutes { message ScopedRds { // Configuration source specifier for scoped RDS. - envoy.api.v3alpha.core.ConfigSource scoped_rds_config_source = 1 - [(validate.rules).message.required = true]; + api.v3alpha.core.ConfigSource scoped_rds_config_source = 1 + [(validate.rules).message = {required: true}]; } message HttpFilter { + reserved 3; + // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. @@ -588,6 +601,4 @@ message HttpFilter { google.protobuf.Any typed_config = 4; } - - reserved 3; } diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto index 46ef44c96b94..724c8a3b4c40 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto +++ b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto @@ -16,7 +16,7 @@ import "validate/validate.proto"; message MongoProxy { // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The optional path to use for writing Mongo access logs. If not access log // path is specified no access logs will be written. Note that access log is @@ -27,7 +27,7 @@ message MongoProxy { // applied to the following MongoDB operations: Query, Insert, GetMore, // and KillCursors. Once an active delay is in progress, all incoming // data up until the timer event fires will be a part of the delay. - envoy.config.filter.fault.v2.FaultDelay delay = 3; + fault.v2.FaultDelay delay = 3; // Flag to specify whether :ref:`dynamic metadata // ` should be emitted. Defaults to false. diff --git a/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto index 780483ccb4c8..9ca7a7a3a5e6 100644 --- a/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto +++ b/api/envoy/config/filter/network/mongo_proxy/v3alpha/mongo_proxy.proto @@ -16,7 +16,7 @@ import "validate/validate.proto"; message MongoProxy { // The human readable prefix to use when emitting :ref:`statistics // `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The optional path to use for writing Mongo access logs. If not access log // path is specified no access logs will be written. Note that access log is @@ -27,7 +27,7 @@ message MongoProxy { // applied to the following MongoDB operations: Query, Insert, GetMore, // and KillCursors. Once an active delay is in progress, all incoming // data up until the timer event fires will be a part of the delay. - envoy.config.filter.fault.v3alpha.FaultDelay delay = 3; + fault.v3alpha.FaultDelay delay = 3; // Flag to specify whether :ref:`dynamic metadata // ` should be emitted. Defaults to false. diff --git a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto index f5d484fac1ba..9778f02bc342 100644 --- a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto +++ b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto @@ -18,14 +18,14 @@ import "validate/validate.proto"; message RateLimit { // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string.min_bytes = 1]; + string domain = 2 [(validate.rules).string = {min_bytes: 1}]; // The rate limit descriptor list to use in the rate limit service request. - repeated envoy.api.v2.ratelimit.RateLimitDescriptor descriptors = 3 - [(validate.rules).repeated .min_items = 1]; + repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 3 + [(validate.rules).repeated = {min_items: 1}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. @@ -40,6 +40,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6 - [(validate.rules).message.required = true]; + ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto index 522fe145a7f7..7ba60ddcd1b8 100644 --- a/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto +++ b/api/envoy/config/filter/network/rate_limit/v3alpha/rate_limit.proto @@ -18,14 +18,14 @@ import "validate/validate.proto"; message RateLimit { // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string.min_bytes = 1]; + string domain = 2 [(validate.rules).string = {min_bytes: 1}]; // The rate limit descriptor list to use in the rate limit service request. - repeated envoy.api.v3alpha.ratelimit.RateLimitDescriptor descriptors = 3 - [(validate.rules).repeated .min_items = 1]; + repeated api.v3alpha.ratelimit.RateLimitDescriptor descriptors = 3 + [(validate.rules).repeated = {min_items: 1}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. @@ -40,6 +40,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - envoy.config.ratelimit.v3alpha.RateLimitServiceConfig rate_limit_service = 6 - [(validate.rules).message.required = true]; + ratelimit.v3alpha.RateLimitServiceConfig rate_limit_service = 6 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/network/rbac/v2/rbac.proto b/api/envoy/config/filter/network/rbac/v2/rbac.proto index c192b888e559..ea24eb50f431 100644 --- a/api/envoy/config/filter/network/rbac/v2/rbac.proto +++ b/api/envoy/config/filter/network/rbac/v2/rbac.proto @@ -18,6 +18,17 @@ import "validate/validate.proto"; // Header should not be used in rules/shadow_rules in RBAC network filter as // this information is only available in :ref:`RBAC http filter `. message RBAC { + enum EnforcementType { + // Apply RBAC policies when the first byte of data arrives on the connection. + ONE_TIME_ON_FIRST_BYTE = 0; + + // Continuously apply RBAC policies as data arrives. Use this mode when + // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, + // etc. when the protocol decoders emit dynamic metadata such as the + // resources being accessed and the operations on the resources. + CONTINUOUS = 1; + } + // Specify the RBAC rules to be applied globally. // If absent, no enforcing RBAC policy will be applied. config.rbac.v2.RBAC rules = 1; @@ -28,18 +39,7 @@ message RBAC { config.rbac.v2.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string.min_bytes = 1]; - - enum EnforcementType { - // Apply RBAC policies when the first byte of data arrives on the connection. - ONE_TIME_ON_FIRST_BYTE = 0; - - // Continuously apply RBAC policies as data arrives. Use this mode when - // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, - // etc. when the protocol decoders emit dynamic metadata such as the - // resources being accessed and the operations on the resources. - CONTINUOUS = 1; - }; + string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in diff --git a/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto index c1dcd6568262..b8ec5828baec 100644 --- a/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto +++ b/api/envoy/config/filter/network/rbac/v3alpha/rbac.proto @@ -18,6 +18,17 @@ import "validate/validate.proto"; // Header should not be used in rules/shadow_rules in RBAC network filter as // this information is only available in :ref:`RBAC http filter `. message RBAC { + enum EnforcementType { + // Apply RBAC policies when the first byte of data arrives on the connection. + ONE_TIME_ON_FIRST_BYTE = 0; + + // Continuously apply RBAC policies as data arrives. Use this mode when + // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, + // etc. when the protocol decoders emit dynamic metadata such as the + // resources being accessed and the operations on the resources. + CONTINUOUS = 1; + } + // Specify the RBAC rules to be applied globally. // If absent, no enforcing RBAC policy will be applied. config.rbac.v3alpha.RBAC rules = 1; @@ -28,18 +39,7 @@ message RBAC { config.rbac.v3alpha.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string.min_bytes = 1]; - - enum EnforcementType { - // Apply RBAC policies when the first byte of data arrives on the connection. - ONE_TIME_ON_FIRST_BYTE = 0; - - // Continuously apply RBAC policies as data arrives. Use this mode when - // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, - // etc. when the protocol decoders emit dynamic metadata such as the - // resources being accessed and the operations on the resources. - CONTINUOUS = 1; - }; + string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index 78c56bb2efe6..6a9fa5c2ad53 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -17,29 +17,39 @@ import "validate/validate.proto"; // Redis Proxy :ref:`configuration overview `. message RedisProxy { - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - // Name of cluster from cluster manager. See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing cluster. - // - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch_all - // route` - // instead. - string cluster = 2 [deprecated = true]; - // Redis connection pool settings. message ConnPoolSettings { + // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently + // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data + // because replication is asynchronous and requires some delay. You need to ensure that your + // application can tolerate stale data. + enum ReadPolicy { + // Default mode. Read from the current master node. + MASTER = 0; + + // Read from the master, but if it is unavailable, read from replica nodes. + PREFER_MASTER = 1; + + // Read from replica nodes. If multiple replica nodes are present within a shard, a random + // node is selected. Healthy nodes have precedent over unhealthy nodes. + REPLICA = 2; + + // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not + // present or unhealthy), read from the master. + PREFER_REPLICA = 3; + + // Read from any node of the cluster. A random node is selected among the master and replicas, + // healthy nodes have precedent over unhealthy nodes. + ANY = 4; + } + // Per-operation timeout in milliseconds. The timer starts when the first // command of a pipeline is written to the backend connection. Each response received from Redis // resets the timer since it signifies that the next command is being processed by the backend. // The only exception to this behavior is when a connection to a backend is not yet established. // In that case, the connect timeout on the cluster will govern the timeout until the connection // is ready. - google.protobuf.Duration op_timeout = 1 [(validate.rules).duration.required = true]; + google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}]; // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be // forwarded to the same upstream. The hash key used for determining the upstream in a @@ -92,49 +102,12 @@ message RedisProxy { // count. bool enable_command_stats = 8; - // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently - // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data - // because replication is asynchronous and requires some delay. You need to ensure that your - // application can tolerate stale data. - enum ReadPolicy { - // Default mode. Read from the current master node. - MASTER = 0; - // Read from the master, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1; - // Read from replica nodes. If multiple replica nodes are present within a shard, a random - // node is selected. Healthy nodes have precedent over unhealthy nodes. - REPLICA = 2; - // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. - PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. - ANY = 4; - } - // Read policy. The default is to read from the master. - ReadPolicy read_policy = 7 [(validate.rules).enum.defined_only = true]; + ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } - // Network settings for the connection pool to the upstream clusters. - ConnPoolSettings settings = 3 [(validate.rules).message.required = true]; - - // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. - bool latency_in_micros = 4; - message PrefixRoutes { message Route { - // String prefix that must match the beginning of the keys. Envoy will always favor the - // longest match. - string prefix = 1; - - // Indicates if the prefix needs to be removed from the key when forwarded. - bool remove_prefix = 2; - - // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string.min_bytes = 1]; - // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are @@ -142,7 +115,7 @@ message RedisProxy { message RequestMirrorPolicy { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // If not specified or the runtime key is not present, all requests to the target cluster // will be mirrored. @@ -156,13 +129,23 @@ message RedisProxy { // is an integral percentage out of 100. For instance, a runtime key lookup returning the // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is // HUNDRED. - envoy.api.v2.core.RuntimeFractionalPercent runtime_fraction = 2; + api.v2.core.RuntimeFractionalPercent runtime_fraction = 2; // Set this to TRUE to only mirror write commands, this is effectively replicating the // writes in a "fire and forget" manner. bool exclude_read_commands = 3; } + // String prefix that must match the beginning of the keys. Envoy will always favor the + // longest match. + string prefix = 1; + + // Indicates if the prefix needs to be removed from the key when forwarded. + bool remove_prefix = 2; + + // Upstream cluster to forward the command to. + string cluster = 3 [(validate.rules).string = {min_bytes: 1}]; + // Indicates that the route has a request mirroring policy. repeated RequestMirrorPolicy request_mirror_policy = 4; } @@ -187,6 +170,27 @@ message RedisProxy { Route catch_all_route = 4; } + // The prefix to use when emitting :ref:`statistics `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Name of cluster from cluster manager. See the :ref:`configuration section + // ` of the architecture overview for recommendations on + // configuring the backing cluster. + // + // .. attention:: + // + // This field is deprecated. Use a :ref:`catch_all + // route` + // instead. + string cluster = 2 [deprecated = true]; + + // Network settings for the connection pool to the upstream clusters. + ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; + + // Indicates that latency stat should be computed in microseconds. By default it is computed in + // milliseconds. + bool latency_in_micros = 4; + // List of **unique** prefixes used to separate keys from different workloads to different // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all // cluster can be used to forward commands when there is no match. Time complexity of the @@ -224,7 +228,7 @@ message RedisProxy { // password is set, then a "NOAUTH Authentication required." error response will be sent to the // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no password is set" error will be returned. - envoy.api.v2.core.DataSource downstream_auth_password = 6; + api.v2.core.DataSource downstream_auth_password = 6; } // RedisProtocolOptions specifies Redis upstream protocol options. This object is used in @@ -233,5 +237,5 @@ message RedisProxy { message RedisProtocolOptions { // Upstream server password as defined by the `requirepass directive // `_ in the server's configuration file. - envoy.api.v2.core.DataSource auth_password = 1; + api.v2.core.DataSource auth_password = 1; } diff --git a/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto index bb9307cb327d..f96036bc311e 100644 --- a/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v3alpha/redis_proxy.proto @@ -17,29 +17,39 @@ import "validate/validate.proto"; // Redis Proxy :ref:`configuration overview `. message RedisProxy { - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - // Name of cluster from cluster manager. See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing cluster. - // - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch_all - // route` - // instead. - string cluster = 2 [deprecated = true]; - // Redis connection pool settings. message ConnPoolSettings { + // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently + // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data + // because replication is asynchronous and requires some delay. You need to ensure that your + // application can tolerate stale data. + enum ReadPolicy { + // Default mode. Read from the current master node. + MASTER = 0; + + // Read from the master, but if it is unavailable, read from replica nodes. + PREFER_MASTER = 1; + + // Read from replica nodes. If multiple replica nodes are present within a shard, a random + // node is selected. Healthy nodes have precedent over unhealthy nodes. + REPLICA = 2; + + // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not + // present or unhealthy), read from the master. + PREFER_REPLICA = 3; + + // Read from any node of the cluster. A random node is selected among the master and replicas, + // healthy nodes have precedent over unhealthy nodes. + ANY = 4; + } + // Per-operation timeout in milliseconds. The timer starts when the first // command of a pipeline is written to the backend connection. Each response received from Redis // resets the timer since it signifies that the next command is being processed by the backend. // The only exception to this behavior is when a connection to a backend is not yet established. // In that case, the connect timeout on the cluster will govern the timeout until the connection // is ready. - google.protobuf.Duration op_timeout = 1 [(validate.rules).duration.required = true]; + google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}]; // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be // forwarded to the same upstream. The hash key used for determining the upstream in a @@ -88,49 +98,16 @@ message RedisProxy { // downstream unchanged. This limit defaults to 100. google.protobuf.UInt32Value max_upstream_unknown_connections = 6; - // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently - // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data - // because replication is asynchronous and requires some delay. You need to ensure that your - // application can tolerate stale data. - enum ReadPolicy { - // Default mode. Read from the current master node. - MASTER = 0; - // Read from the master, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1; - // Read from replica nodes. If multiple replica nodes are present within a shard, a random - // node is selected. Healthy nodes have precedent over unhealthy nodes. - REPLICA = 2; - // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. - PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. - ANY = 4; - } + // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate + // count. + bool enable_command_stats = 8; // Read policy. The default is to read from the master. - ReadPolicy read_policy = 7 [(validate.rules).enum.defined_only = true]; + ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } - // Network settings for the connection pool to the upstream clusters. - ConnPoolSettings settings = 3 [(validate.rules).message.required = true]; - - // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. - bool latency_in_micros = 4; - message PrefixRoutes { message Route { - // String prefix that must match the beginning of the keys. Envoy will always favor the - // longest match. - string prefix = 1; - - // Indicates if the prefix needs to be removed from the key when forwarded. - bool remove_prefix = 2; - - // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string.min_bytes = 1]; - // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are @@ -138,7 +115,7 @@ message RedisProxy { message RequestMirrorPolicy { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // If not specified or the runtime key is not present, all requests to the target cluster // will be mirrored. @@ -152,13 +129,23 @@ message RedisProxy { // is an integral percentage out of 100. For instance, a runtime key lookup returning the // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is // HUNDRED. - envoy.api.v3alpha.core.RuntimeFractionalPercent runtime_fraction = 2; + api.v3alpha.core.RuntimeFractionalPercent runtime_fraction = 2; // Set this to TRUE to only mirror write commands, this is effectively replicating the // writes in a "fire and forget" manner. bool exclude_read_commands = 3; } + // String prefix that must match the beginning of the keys. Envoy will always favor the + // longest match. + string prefix = 1; + + // Indicates if the prefix needs to be removed from the key when forwarded. + bool remove_prefix = 2; + + // Upstream cluster to forward the command to. + string cluster = 3 [(validate.rules).string = {min_bytes: 1}]; + // Indicates that the route has a request mirroring policy. repeated RequestMirrorPolicy request_mirror_policy = 4; } @@ -183,6 +170,27 @@ message RedisProxy { Route catch_all_route = 4; } + // The prefix to use when emitting :ref:`statistics `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Name of cluster from cluster manager. See the :ref:`configuration section + // ` of the architecture overview for recommendations on + // configuring the backing cluster. + // + // .. attention:: + // + // This field is deprecated. Use a :ref:`catch_all + // route` + // instead. + string cluster = 2 [deprecated = true]; + + // Network settings for the connection pool to the upstream clusters. + ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; + + // Indicates that latency stat should be computed in microseconds. By default it is computed in + // milliseconds. + bool latency_in_micros = 4; + // List of **unique** prefixes used to separate keys from different workloads to different // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all // cluster can be used to forward commands when there is no match. Time complexity of the @@ -220,7 +228,7 @@ message RedisProxy { // password is set, then a "NOAUTH Authentication required." error response will be sent to the // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no password is set" error will be returned. - envoy.api.v3alpha.core.DataSource downstream_auth_password = 6; + api.v3alpha.core.DataSource downstream_auth_password = 6; } // RedisProtocolOptions specifies Redis upstream protocol options. This object is used in @@ -229,5 +237,5 @@ message RedisProxy { message RedisProtocolOptions { // Upstream server password as defined by the `requirepass directive // `_ in the server's configuration file. - envoy.api.v3alpha.core.DataSource auth_password = 1; + api.v3alpha.core.DataSource auth_password = 1; } diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index 8e4453dd9f7d..0ac01842159f 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -6,9 +6,9 @@ option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2"; -import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; +import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -19,49 +19,6 @@ import "validate/validate.proto"; // TCP Proxy :ref:`configuration overview `. message TcpProxy { - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - // - string cluster = 2; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } - - // Optional endpoint metadata match criteria. Only endpoints in the upstream - // cluster with metadata matching that set in metadata_match will be - // considered. The filter name should be specified as *envoy.lb*. - envoy.api.v2.core.Metadata metadata_match = 9; - - // The idle timeout for connections managed by the TCP proxy filter. The idle timeout - // is defined as the period in which there are no bytes sent or received on either - // the upstream or downstream connection. If not set, connections will never be closed - // by the TCP proxy due to being idle. - google.protobuf.Duration idle_timeout = 8 [(validate.rules).duration.gt = {}]; - - // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy - // filter. The idle timeout is defined as the period in which there is no - // active traffic. If not set, there is no idle timeout. When the idle timeout - // is reached the connection will be closed. The distinction between - // downstream_idle_timeout/upstream_idle_timeout provides a means to set - // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; - - // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; - - // Configuration for :ref:`access logs ` - // emitted by the this tcp_proxy. - repeated envoy.config.filter.accesslog.v2.AccessLog access_log = 5; - // [#not-implemented-hide:] Deprecated. // TCP Proxy filter configuration using V1 format. message DeprecatedV1 { @@ -75,7 +32,7 @@ message TcpProxy { message TCPRoute { // The cluster to connect to when a the downstream network connection // matches the specified criteria. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // An optional list of IP address subnets in the form // “ip_address/xx”. The criteria is satisfied if the destination IP @@ -85,7 +42,7 @@ message TcpProxy { // address of the downstream connection might be different from the // addresses on which the proxy is listening if the connection has been // redirected. - repeated envoy.api.v2.core.CidrRange destination_ip_list = 2; + repeated api.v2.core.CidrRange destination_ip_list = 2; // An optional string containing a comma-separated list of port numbers // or ranges. The criteria is satisfied if the destination port of the @@ -101,7 +58,7 @@ message TcpProxy { // of the downstream connection is contained in at least one of the // specified subnets. If the parameter is not specified or the list is // empty, the source IP address is ignored. - repeated envoy.api.v2.core.CidrRange source_ip_list = 4; + repeated api.v2.core.CidrRange source_ip_list = 4; // An optional string containing a comma-separated list of port numbers // or ranges. The criteria is satisfied if the source port of the @@ -113,31 +70,74 @@ message TcpProxy { // The route table for the filter. All filter instances must have a route // table, even if it is empty. - repeated TCPRoute routes = 1 [(validate.rules).repeated .min_items = 1]; + repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; } - // [#not-implemented-hide:] Deprecated. - DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; - - // The maximum number of unsuccessful connection attempts that will be made before - // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32.gte = 1]; - // Allows for specification of multiple upstream clusters along with weights // that indicate the percentage of traffic to be forwarded to each cluster. // The router selects an upstream cluster based on these weights. message WeightedCluster { message ClusterWeight { // Name of the upstream cluster. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the // clusters array determines the total weight. - uint32 weight = 2 [(validate.rules).uint32.gte = 1]; + uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; } // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } + + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof cluster_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + // + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + api.v2.core.Metadata metadata_match = 9; + + // The idle timeout for connections managed by the TCP proxy filter. The idle timeout + // is defined as the period in which there are no bytes sent or received on either + // the upstream or downstream connection. If not set, connections will never be closed + // by the TCP proxy due to being idle. + google.protobuf.Duration idle_timeout = 8 [(validate.rules).duration = {gt {}}]; + + // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy + // filter. The idle timeout is defined as the period in which there is no + // active traffic. If not set, there is no idle timeout. When the idle timeout + // is reached the connection will be closed. The distinction between + // downstream_idle_timeout/upstream_idle_timeout provides a means to set + // timeout based on the last byte sent on the downstream/upstream connection. + google.protobuf.Duration downstream_idle_timeout = 3; + + // [#not-implemented-hide:] + google.protobuf.Duration upstream_idle_timeout = 4; + + // Configuration for :ref:`access logs ` + // emitted by the this tcp_proxy. + repeated accesslog.v2.AccessLog access_log = 5; + + // [#not-implemented-hide:] Deprecated. + DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; + + // The maximum number of unsuccessful connection attempts that will be made before + // giving up. If the parameter is not specified, 1 connection attempt will be made. + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; } diff --git a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto index ff74cc06cd75..c8fe7962d43d 100644 --- a/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto +++ b/api/envoy/config/filter/network/tcp_proxy/v3alpha/tcp_proxy.proto @@ -6,9 +6,9 @@ option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v3alpha"; -import "envoy/config/filter/accesslog/v3alpha/accesslog.proto"; import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/core/base.proto"; +import "envoy/config/filter/accesslog/v3alpha/accesslog.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -19,49 +19,6 @@ import "validate/validate.proto"; // TCP Proxy :ref:`configuration overview `. message TcpProxy { - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - // - string cluster = 2; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } - - // Optional endpoint metadata match criteria. Only endpoints in the upstream - // cluster with metadata matching that set in metadata_match will be - // considered. The filter name should be specified as *envoy.lb*. - envoy.api.v3alpha.core.Metadata metadata_match = 9; - - // The idle timeout for connections managed by the TCP proxy filter. The idle timeout - // is defined as the period in which there are no bytes sent or received on either - // the upstream or downstream connection. If not set, connections will never be closed - // by the TCP proxy due to being idle. - google.protobuf.Duration idle_timeout = 8 [(validate.rules).duration.gt = {}]; - - // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy - // filter. The idle timeout is defined as the period in which there is no - // active traffic. If not set, there is no idle timeout. When the idle timeout - // is reached the connection will be closed. The distinction between - // downstream_idle_timeout/upstream_idle_timeout provides a means to set - // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; - - // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; - - // Configuration for :ref:`access logs ` - // emitted by the this tcp_proxy. - repeated envoy.config.filter.accesslog.v3alpha.AccessLog access_log = 5; - // [#not-implemented-hide:] Deprecated. // TCP Proxy filter configuration using V1 format. message DeprecatedV1 { @@ -75,7 +32,7 @@ message TcpProxy { message TCPRoute { // The cluster to connect to when a the downstream network connection // matches the specified criteria. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // An optional list of IP address subnets in the form // “ip_address/xx”. The criteria is satisfied if the destination IP @@ -85,7 +42,7 @@ message TcpProxy { // address of the downstream connection might be different from the // addresses on which the proxy is listening if the connection has been // redirected. - repeated envoy.api.v3alpha.core.CidrRange destination_ip_list = 2; + repeated api.v3alpha.core.CidrRange destination_ip_list = 2; // An optional string containing a comma-separated list of port numbers // or ranges. The criteria is satisfied if the destination port of the @@ -101,7 +58,7 @@ message TcpProxy { // of the downstream connection is contained in at least one of the // specified subnets. If the parameter is not specified or the list is // empty, the source IP address is ignored. - repeated envoy.api.v3alpha.core.CidrRange source_ip_list = 4; + repeated api.v3alpha.core.CidrRange source_ip_list = 4; // An optional string containing a comma-separated list of port numbers // or ranges. The criteria is satisfied if the source port of the @@ -113,31 +70,74 @@ message TcpProxy { // The route table for the filter. All filter instances must have a route // table, even if it is empty. - repeated TCPRoute routes = 1 [(validate.rules).repeated .min_items = 1]; + repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; } - // [#not-implemented-hide:] Deprecated. - DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; - - // The maximum number of unsuccessful connection attempts that will be made before - // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32.gte = 1]; - // Allows for specification of multiple upstream clusters along with weights // that indicate the percentage of traffic to be forwarded to each cluster. // The router selects an upstream cluster based on these weights. message WeightedCluster { message ClusterWeight { // Name of the upstream cluster. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the // clusters array determines the total weight. - uint32 weight = 2 [(validate.rules).uint32.gte = 1]; + uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; } // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } + + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof cluster_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + // + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + api.v3alpha.core.Metadata metadata_match = 9; + + // The idle timeout for connections managed by the TCP proxy filter. The idle timeout + // is defined as the period in which there are no bytes sent or received on either + // the upstream or downstream connection. If not set, connections will never be closed + // by the TCP proxy due to being idle. + google.protobuf.Duration idle_timeout = 8 [(validate.rules).duration = {gt {}}]; + + // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy + // filter. The idle timeout is defined as the period in which there is no + // active traffic. If not set, there is no idle timeout. When the idle timeout + // is reached the connection will be closed. The distinction between + // downstream_idle_timeout/upstream_idle_timeout provides a means to set + // timeout based on the last byte sent on the downstream/upstream connection. + google.protobuf.Duration downstream_idle_timeout = 3; + + // [#not-implemented-hide:] + google.protobuf.Duration upstream_idle_timeout = 4; + + // Configuration for :ref:`access logs ` + // emitted by the this tcp_proxy. + repeated accesslog.v3alpha.AccessLog access_log = 5; + + // [#not-implemented-hide:] Deprecated. + DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; + + // The maximum number of unsuccessful connection attempts that will be made before + // giving up. If the parameter is not specified, 1 connection attempt will be made. + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; } diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto index 33d120047159..0f67a20c2180 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto @@ -29,10 +29,10 @@ message RouteConfiguration { // [#comment:next free field: 3] message Route { // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message.required = true]; + RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message.required = true]; + RouteAction route = 2 [(validate.rules).message = {required: true}]; } // [#comment:next free field: 5] @@ -69,7 +69,7 @@ message RouteMatch { // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). Note that this only applies for Thrift transports and/or // protocols that support headers. - repeated envoy.api.v2.route.HeaderMatcher headers = 4; + repeated api.v2.route.HeaderMatcher headers = 4; } // [#comment:next free field: 5] @@ -79,7 +79,7 @@ message RouteAction { // Indicates a single upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -93,12 +93,12 @@ message RouteAction { // `, // with values there taking precedence. Keys and values should be provided under the "envoy.lb" // metadata key. - envoy.api.v2.core.Metadata metadata_match = 3; + api.v2.core.Metadata metadata_match = 3; // Specifies a set of rate limit configurations that could be applied to the route. // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders // action with the header name ":method-name". - repeated envoy.api.v2.route.RateLimit rate_limits = 4; + repeated api.v2.route.RateLimit rate_limits = 4; } // Allows for specification of multiple upstream clusters along with weights that indicate the @@ -107,12 +107,12 @@ message RouteAction { message WeightedCluster { message ClusterWeight { // Name of the upstream cluster. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total // weight. - google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32.gte = 1]; + google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field, combined with what's @@ -120,9 +120,9 @@ message WeightedCluster { // `, // will be considered. Values here will take precedence. Keys and values should be provided // under the "envoy.lb" metadata key. - envoy.api.v2.core.Metadata metadata_match = 3; + api.v2.core.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto index 823a1747527b..e925cfe697d8 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto @@ -16,32 +16,8 @@ import "validate/validate.proto"; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. -// [#comment:next free field: 6] -message ThriftProxy { - // Supplies the type of transport that the Thrift proxy should use. Defaults to - // :ref:`AUTO_TRANSPORT`. - TransportType transport = 2 [(validate.rules).enum.defined_only = true]; - - // Supplies the type of protocol that the Thrift proxy should use. Defaults to - // :ref:`AUTO_PROTOCOL`. - ProtocolType protocol = 3 [(validate.rules).enum.defined_only = true]; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - // The route table for the connection manager is static and is specified in this property. - RouteConfiguration route_config = 4; - - // A list of individual Thrift filters that make up the filter chain for requests made to the - // Thrift proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no thrift_filters are specified, a default Thrift router filter - // (`envoy.filters.thrift.router`) is used. - repeated ThriftFilter thrift_filters = 5; -} - // Thrift transport types supported by Envoy. enum TransportType { - // For downstream connections, the Thrift proxy will attempt to determine which transport to use. // For upstream connections, the Thrift proxy will use same transport as the downstream // connection. @@ -59,7 +35,6 @@ enum TransportType { // Thrift Protocol types supported by Envoy. enum ProtocolType { - // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol // detection. For upstream connections, the Thrift proxy will use the same protocol as the @@ -79,6 +54,29 @@ enum ProtocolType { TWITTER = 4; } +// [#comment:next free field: 6] +message ThriftProxy { + // Supplies the type of transport that the Thrift proxy should use. Defaults to + // :ref:`AUTO_TRANSPORT`. + TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use. Defaults to + // :ref:`AUTO_PROTOCOL`. + ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is static and is specified in this property. + RouteConfiguration route_config = 4; + + // A list of individual Thrift filters that make up the filter chain for requests made to the + // Thrift proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no thrift_filters are specified, a default Thrift router filter + // (`envoy.filters.thrift.router`) is used. + repeated ThriftFilter thrift_filters = 5; +} + // ThriftFilter configures a Thrift filter. // [#comment:next free field: 3] message ThriftFilter { @@ -88,7 +86,7 @@ message ThriftFilter { // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. @@ -108,11 +106,11 @@ message ThriftProtocolOptions { // Selecting // :ref:`AUTO_TRANSPORT`, // which is the default, causes the proxy to use the same transport as the downstream connection. - TransportType transport = 1 [(validate.rules).enum.defined_only = true]; + TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; // Supplies the type of protocol that the Thrift proxy should use for upstream connections. // Selecting // :ref:`AUTO_PROTOCOL`, // which is the default, causes the proxy to use the same protocol as the downstream connection. - ProtocolType protocol = 2 [(validate.rules).enum.defined_only = true]; + ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/config/filter/network/thrift_proxy/v3alpha/route.proto b/api/envoy/config/filter/network/thrift_proxy/v3alpha/route.proto index 3a351b1449d0..2dc74be0952b 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v3alpha/route.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v3alpha/route.proto @@ -29,10 +29,10 @@ message RouteConfiguration { // [#comment:next free field: 3] message Route { // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message.required = true]; + RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message.required = true]; + RouteAction route = 2 [(validate.rules).message = {required: true}]; } // [#comment:next free field: 5] @@ -69,7 +69,7 @@ message RouteMatch { // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). Note that this only applies for Thrift transports and/or // protocols that support headers. - repeated envoy.api.v3alpha.route.HeaderMatcher headers = 4; + repeated api.v3alpha.route.HeaderMatcher headers = 4; } // [#comment:next free field: 5] @@ -79,7 +79,7 @@ message RouteAction { // Indicates a single upstream cluster to which the request should be routed // to. - string cluster = 1 [(validate.rules).string.min_bytes = 1]; + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights @@ -93,12 +93,12 @@ message RouteAction { // `, // with values there taking precedence. Keys and values should be provided under the "envoy.lb" // metadata key. - envoy.api.v3alpha.core.Metadata metadata_match = 3; + api.v3alpha.core.Metadata metadata_match = 3; // Specifies a set of rate limit configurations that could be applied to the route. // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders // action with the header name ":method-name". - repeated envoy.api.v3alpha.route.RateLimit rate_limits = 4; + repeated api.v3alpha.route.RateLimit rate_limits = 4; } // Allows for specification of multiple upstream clusters along with weights that indicate the @@ -107,12 +107,12 @@ message RouteAction { message WeightedCluster { message ClusterWeight { // Name of the upstream cluster. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total // weight. - google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32.gte = 1]; + google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field, combined with what's @@ -120,9 +120,9 @@ message WeightedCluster { // `, // will be considered. Values here will take precedence. Keys and values should be provided // under the "envoy.lb" metadata key. - envoy.api.v3alpha.core.Metadata metadata_match = 3; + api.v3alpha.core.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto index 83f44bbf720b..fc521b6de83c 100644 --- a/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto +++ b/api/envoy/config/filter/network/thrift_proxy/v3alpha/thrift_proxy.proto @@ -16,32 +16,8 @@ import "validate/validate.proto"; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. -// [#comment:next free field: 6] -message ThriftProxy { - // Supplies the type of transport that the Thrift proxy should use. Defaults to - // :ref:`AUTO_TRANSPORT`. - TransportType transport = 2 [(validate.rules).enum.defined_only = true]; - - // Supplies the type of protocol that the Thrift proxy should use. Defaults to - // :ref:`AUTO_PROTOCOL`. - ProtocolType protocol = 3 [(validate.rules).enum.defined_only = true]; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; - - // The route table for the connection manager is static and is specified in this property. - RouteConfiguration route_config = 4; - - // A list of individual Thrift filters that make up the filter chain for requests made to the - // Thrift proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no thrift_filters are specified, a default Thrift router filter - // (`envoy.filters.thrift.router`) is used. - repeated ThriftFilter thrift_filters = 5; -} - // Thrift transport types supported by Envoy. enum TransportType { - // For downstream connections, the Thrift proxy will attempt to determine which transport to use. // For upstream connections, the Thrift proxy will use same transport as the downstream // connection. @@ -59,7 +35,6 @@ enum TransportType { // Thrift Protocol types supported by Envoy. enum ProtocolType { - // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol // detection. For upstream connections, the Thrift proxy will use the same protocol as the @@ -79,6 +54,29 @@ enum ProtocolType { TWITTER = 4; } +// [#comment:next free field: 6] +message ThriftProxy { + // Supplies the type of transport that the Thrift proxy should use. Defaults to + // :ref:`AUTO_TRANSPORT`. + TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use. Defaults to + // :ref:`AUTO_PROTOCOL`. + ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is static and is specified in this property. + RouteConfiguration route_config = 4; + + // A list of individual Thrift filters that make up the filter chain for requests made to the + // Thrift proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no thrift_filters are specified, a default Thrift router filter + // (`envoy.filters.thrift.router`) is used. + repeated ThriftFilter thrift_filters = 5; +} + // ThriftFilter configures a Thrift filter. // [#comment:next free field: 3] message ThriftFilter { @@ -88,7 +86,7 @@ message ThriftFilter { // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. @@ -108,11 +106,11 @@ message ThriftProtocolOptions { // Selecting // :ref:`AUTO_TRANSPORT`, // which is the default, causes the proxy to use the same transport as the downstream connection. - TransportType transport = 1 [(validate.rules).enum.defined_only = true]; + TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; // Supplies the type of protocol that the Thrift proxy should use for upstream connections. // Selecting // :ref:`AUTO_PROTOCOL`, // which is the default, causes the proxy to use the same protocol as the downstream connection. - ProtocolType protocol = 2 [(validate.rules).enum.defined_only = true]; + ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto index ff2463b26c6c..5cc681ff6010 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto +++ b/api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto @@ -18,7 +18,7 @@ import "validate/validate.proto"; // [#comment:next free field: 5] message RateLimit { // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string.min_bytes = 1]; + string domain = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies the rate limit configuration stage. Each configured rate limit filter performs a // rate limit check using descriptors configured in the @@ -29,7 +29,7 @@ message RateLimit { // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32.lte = 10]; + uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. @@ -44,6 +44,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5 - [(validate.rules).message.required = true]; + ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto b/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto index 017d9546a9a3..abbe8caad7ab 100644 --- a/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto +++ b/api/envoy/config/filter/thrift/rate_limit/v3alpha/rate_limit.proto @@ -18,7 +18,7 @@ import "validate/validate.proto"; // [#comment:next free field: 5] message RateLimit { // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string.min_bytes = 1]; + string domain = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies the rate limit configuration stage. Each configured rate limit filter performs a // rate limit check using descriptors configured in the @@ -29,7 +29,7 @@ message RateLimit { // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32.lte = 10]; + uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. @@ -44,6 +44,6 @@ message RateLimit { // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. - envoy.config.ratelimit.v3alpha.RateLimitServiceConfig rate_limit_service = 5 - [(validate.rules).message.required = true]; + ratelimit.v3alpha.RateLimitServiceConfig rate_limit_service = 5 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto index e7a7bf94cce6..154572901a7c 100644 --- a/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v2alpha/aws_iam.proto @@ -1,23 +1,23 @@ syntax = "proto3"; -// [#protodoc-title: Grpc Credentials AWS IAM] -// Configuration for AWS IAM Grpc Credentials Plugin - package envoy.config.grpc_credential.v2alpha; option java_outer_classname = "AwsIamProto"; -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; import "validate/validate.proto"; +// [#protodoc-title: Grpc Credentials AWS IAM] +// Configuration for AWS IAM Grpc Credentials Plugin + message AwsIamConfig { // The `service namespace // `_ // of the Grpc endpoint. // // Example: appmesh - string service_name = 1 [(validate.rules).string.min_bytes = 1]; + string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The `region `_ hosting the Grpc // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment diff --git a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto index 1746492fe261..cd9f27d71e45 100644 --- a/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto +++ b/api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto @@ -1,8 +1,5 @@ syntax = "proto3"; -// [#protodoc-title: Grpc Credentials File Based Metadata] -// Configuration for File Based Metadata Grpc Credentials Plugin - package envoy.config.grpc_credential.v2alpha; option java_outer_classname = "FileBasedMetadataProto"; @@ -11,11 +8,13 @@ option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; import "envoy/api/v2/core/base.proto"; -message FileBasedMetadataConfig { +// [#protodoc-title: Grpc Credentials File Based Metadata] +// Configuration for File Based Metadata Grpc Credentials Plugin +message FileBasedMetadataConfig { // Location or inline data of secret to use for authentication of the Google gRPC connection // this secret will be attached to a header of the gRPC connection - envoy.api.v2.core.DataSource secret_data = 1; + api.v2.core.DataSource secret_data = 1; // Metadata header key to use for sending the secret data // if no header key is set, "authorization" header will be used diff --git a/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto index 29c9cf140a00..fe100f9ded91 100644 --- a/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto +++ b/api/envoy/config/grpc_credential/v3alpha/aws_iam.proto @@ -1,23 +1,23 @@ syntax = "proto3"; -// [#protodoc-title: Grpc Credentials AWS IAM] -// Configuration for AWS IAM Grpc Credentials Plugin - package envoy.config.grpc_credential.v3alpha; option java_outer_classname = "AwsIamProto"; -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha"; option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha"; import "validate/validate.proto"; +// [#protodoc-title: Grpc Credentials AWS IAM] +// Configuration for AWS IAM Grpc Credentials Plugin + message AwsIamConfig { // The `service namespace // `_ // of the Grpc endpoint. // // Example: appmesh - string service_name = 1 [(validate.rules).string.min_bytes = 1]; + string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The `region `_ hosting the Grpc // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment diff --git a/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto index 9bab390cc833..dc8156a57b88 100644 --- a/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto +++ b/api/envoy/config/grpc_credential/v3alpha/file_based_metadata.proto @@ -1,8 +1,5 @@ syntax = "proto3"; -// [#protodoc-title: Grpc Credentials File Based Metadata] -// Configuration for File Based Metadata Grpc Credentials Plugin - package envoy.config.grpc_credential.v3alpha; option java_outer_classname = "FileBasedMetadataProto"; @@ -11,11 +8,13 @@ option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3alpha"; import "envoy/api/v3alpha/core/base.proto"; -message FileBasedMetadataConfig { +// [#protodoc-title: Grpc Credentials File Based Metadata] +// Configuration for File Based Metadata Grpc Credentials Plugin +message FileBasedMetadataConfig { // Location or inline data of secret to use for authentication of the Google gRPC connection // this secret will be attached to a header of the gRPC connection - envoy.api.v3alpha.core.DataSource secret_data = 1; + api.v3alpha.core.DataSource secret_data = 1; // Metadata header key to use for sending the secret data // if no header key is set, "authorization" header will be used diff --git a/api/envoy/config/listener/v2/BUILD b/api/envoy/config/listener/v2/BUILD new file mode 100644 index 000000000000..031cd7ce37cd --- /dev/null +++ b/api/envoy/config/listener/v2/BUILD @@ -0,0 +1,18 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +package_group( + name = "friends", + packages = [ + "//envoy/api/v2", + ], +) + +api_proto_package() + +api_proto_library_internal( + name = "api_listener", + srcs = ["api_listener.proto"], + visibility = [":friends"], +) diff --git a/api/envoy/config/listener/v2/api_listener.proto b/api/envoy/config/listener/v2/api_listener.proto new file mode 100644 index 000000000000..0c2253596e43 --- /dev/null +++ b/api/envoy/config/listener/v2/api_listener.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.config.listener.v2; + +option java_outer_classname = "ApiListenerProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.listener.v2"; + +import "google/protobuf/any.proto"; + +// [#not-implemented-hide:] +// Describes a type of API listener, which is used in non-proxy clients. The type of API +// exposed to the non-proxy application depends on the type of API listener. +message ApiListener { + // The type in this field determines the type of API listener. At present, the following + // types are supported: + // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) + // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the + // specific config message for each type of API listener. We could not do this in v2 because + // it would have caused circular dependencies for go protos: lds.proto depends on this file, + // and http_connection_manager.proto depends on rds.proto, which is in the same directory as + // lds.proto, so lds.proto cannot depend on this file.] + google.protobuf.Any api_listener = 1; +} diff --git a/api/envoy/config/listener/v3alpha/BUILD b/api/envoy/config/listener/v3alpha/BUILD new file mode 100644 index 000000000000..cd728ed17ad1 --- /dev/null +++ b/api/envoy/config/listener/v3alpha/BUILD @@ -0,0 +1,18 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library_internal", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +package_group( + name = "friends", + packages = [ + "//envoy/api/v3alpha", + ], +) + +api_proto_package() + +api_proto_library_internal( + name = "api_listener", + srcs = ["api_listener.proto"], + visibility = [":friends"], +) diff --git a/api/envoy/config/listener/v3alpha/api_listener.proto b/api/envoy/config/listener/v3alpha/api_listener.proto new file mode 100644 index 000000000000..ccf0f3901721 --- /dev/null +++ b/api/envoy/config/listener/v3alpha/api_listener.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.config.listener.v3alpha; + +option java_outer_classname = "ApiListenerProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.listener.v3alpha"; + +import "google/protobuf/any.proto"; + +// [#not-implemented-hide:] +// Describes a type of API listener, which is used in non-proxy clients. The type of API +// exposed to the non-proxy application depends on the type of API listener. +message ApiListener { + // The type in this field determines the type of API listener. At present, the following + // types are supported: + // envoy.config.filter.network.http_connection_manager.v3alpha.HttpConnectionManager (HTTP) + // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the + // specific config message for each type of API listener. We could not do this in v2 because + // it would have caused circular dependencies for go protos: lds.proto depends on this file, + // and http_connection_manager.proto depends on rds.proto, which is in the same directory as + // lds.proto, so lds.proto cannot depend on this file.] + google.protobuf.Any api_listener = 1; +} diff --git a/api/envoy/config/metrics/v2/metrics_service.proto b/api/envoy/config/metrics/v2/metrics_service.proto index 208a5c2076ed..da53e5a52fdc 100644 --- a/api/envoy/config/metrics/v2/metrics_service.proto +++ b/api/envoy/config/metrics/v2/metrics_service.proto @@ -1,7 +1,5 @@ syntax = "proto3"; -// [#protodoc-title: Metrics service] - package envoy.config.metrics.v2; option java_outer_classname = "MetricsServiceProto"; @@ -12,10 +10,12 @@ import "envoy/api/v2/core/grpc_service.proto"; import "validate/validate.proto"; +// [#protodoc-title: Metrics service] + // Metrics Service is configured as a built-in *envoy.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to create // Metrics Service. message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. - envoy.api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; + api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto index fea8b9b0f878..e82f90484cb2 100644 --- a/api/envoy/config/metrics/v2/stats.proto +++ b/api/envoy/config/metrics/v2/stats.proto @@ -1,6 +1,3 @@ -// [#protodoc-title: Stats] -// Statistics :ref:`architecture overview `. - syntax = "proto3"; package envoy.config.metrics.v2; @@ -18,6 +15,9 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; +// [#protodoc-title: Stats] +// Statistics :ref:`architecture overview `. + // Configuration for pluggable stats sinks. message StatsSink { // The name of the stats sink to instantiate. The name must match a supported @@ -149,12 +149,12 @@ message StatsMatcher { // Exclusive match. All stats are enabled except for those matching one of the supplied // StringMatcher protos. - envoy.type.matcher.ListStringMatcher exclusion_list = 2; + type.matcher.ListStringMatcher exclusion_list = 2; // Inclusive match. No stats are enabled except for those matching one of the supplied // StringMatcher protos. - envoy.type.matcher.ListStringMatcher inclusion_list = 3; - }; + type.matcher.ListStringMatcher inclusion_list = 3; + } } // Designates a tag name and value pair. The value may be either a fixed value @@ -231,7 +231,7 @@ message TagSpecifier { // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag // ``envoy.http_conn_manager_prefix`` will be added with the tag value // ``connection_manager_1``. - string regex = 2 [(validate.rules).string.max_bytes = 1024]; + string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; // Specifies a fixed tag value for the ``tag_name``. string fixed_value = 3; @@ -247,13 +247,14 @@ message StatsdSink { // The UDP address of a running `statsd `_ // compliant listener. If specified, statistics will be flushed to this // address. - envoy.api.v2.core.Address address = 1; + api.v2.core.Address address = 1; // The name of a cluster that is running a TCP `statsd // `_ compliant listener. If specified, // Envoy will connect to this cluster to flush statistics. string tcp_cluster_name = 2; } + // Optional custom prefix for StatsdSink. If // specified, this will override the default prefix. // For example: @@ -289,16 +290,16 @@ message StatsdSink { // `. // [#comment:next free field: 3] message DogStatsdSink { + reserved 2; + oneof dog_statsd_specifier { option (validate.required) = true; // The UDP address of a running DogStatsD compliant listener. If specified, // statistics will be flushed to this address. - envoy.api.v2.core.Address address = 1; + api.v2.core.Address address = 1; } - reserved 2; - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; diff --git a/api/envoy/config/metrics/v3alpha/metrics_service.proto b/api/envoy/config/metrics/v3alpha/metrics_service.proto index 392ceb8d6fed..83124c081771 100644 --- a/api/envoy/config/metrics/v3alpha/metrics_service.proto +++ b/api/envoy/config/metrics/v3alpha/metrics_service.proto @@ -1,7 +1,5 @@ syntax = "proto3"; -// [#protodoc-title: Metrics service] - package envoy.config.metrics.v3alpha; option java_outer_classname = "MetricsServiceProto"; @@ -12,10 +10,12 @@ import "envoy/api/v3alpha/core/grpc_service.proto"; import "validate/validate.proto"; +// [#protodoc-title: Metrics service] + // Metrics Service is configured as a built-in *envoy.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to // create Metrics Service. message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. - envoy.api.v3alpha.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; + api.v3alpha.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/metrics/v3alpha/stats.proto b/api/envoy/config/metrics/v3alpha/stats.proto index afa4468b3444..52bffa3caad5 100644 --- a/api/envoy/config/metrics/v3alpha/stats.proto +++ b/api/envoy/config/metrics/v3alpha/stats.proto @@ -1,6 +1,3 @@ -// [#protodoc-title: Stats] -// Statistics :ref:`architecture overview `. - syntax = "proto3"; package envoy.config.metrics.v3alpha; @@ -18,6 +15,9 @@ import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; +// [#protodoc-title: Stats] +// Statistics :ref:`architecture overview `. + // Configuration for pluggable stats sinks. message StatsSink { // The name of the stats sink to instantiate. The name must match a supported @@ -149,12 +149,12 @@ message StatsMatcher { // Exclusive match. All stats are enabled except for those matching one of the supplied // StringMatcher protos. - envoy.type.matcher.ListStringMatcher exclusion_list = 2; + type.matcher.ListStringMatcher exclusion_list = 2; // Inclusive match. No stats are enabled except for those matching one of the supplied // StringMatcher protos. - envoy.type.matcher.ListStringMatcher inclusion_list = 3; - }; + type.matcher.ListStringMatcher inclusion_list = 3; + } } // Designates a tag name and value pair. The value may be either a fixed value @@ -232,7 +232,7 @@ message TagSpecifier { // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag // ``envoy.http_conn_manager_prefix`` will be added with the tag value // ``connection_manager_1``. - string regex = 2 [(validate.rules).string.max_bytes = 1024]; + string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; // Specifies a fixed tag value for the ``tag_name``. string fixed_value = 3; @@ -248,13 +248,14 @@ message StatsdSink { // The UDP address of a running `statsd `_ // compliant listener. If specified, statistics will be flushed to this // address. - envoy.api.v3alpha.core.Address address = 1; + api.v3alpha.core.Address address = 1; // The name of a cluster that is running a TCP `statsd // `_ compliant listener. If specified, // Envoy will connect to this cluster to flush statistics. string tcp_cluster_name = 2; } + // Optional custom prefix for StatsdSink. If // specified, this will override the default prefix. // For example: @@ -290,16 +291,16 @@ message StatsdSink { // `. // [#comment:next free field: 3] message DogStatsdSink { + reserved 2; + oneof dog_statsd_specifier { option (validate.required) = true; // The UDP address of a running DogStatsD compliant listener. If specified, // statistics will be flushed to this address. - envoy.api.v3alpha.core.Address address = 1; + api.v3alpha.core.Address address = 1; } - reserved 2; - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; diff --git a/api/envoy/config/overload/v2alpha/overload.proto b/api/envoy/config/overload/v2alpha/overload.proto index e32764675cb5..dad27524bdb0 100644 --- a/api/envoy/config/overload/v2alpha/overload.proto +++ b/api/envoy/config/overload/v2alpha/overload.proto @@ -27,7 +27,7 @@ message ResourceMonitor { // ` // * :ref:`envoy.resource_monitors.injected_resource // ` - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Configuration for the resource monitor being instantiated. oneof config_type { @@ -40,15 +40,16 @@ message ResourceMonitor { message ThresholdTrigger { // If the resource pressure is greater than or equal to this value, the trigger // will fire. - double value = 1 [(validate.rules).double = {gte: 0, lte: 1}]; + double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; } message Trigger { // The name of the resource this is a trigger for. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof trigger_oneof { option (validate.required) = true; + ThresholdTrigger threshold = 2; } } @@ -57,12 +58,12 @@ message OverloadAction { // The name of the overload action. This is just a well-known string that listeners can // use for registering callbacks. Custom overload actions should be named using reverse // DNS to ensure uniqueness. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A set of triggers for this action. If any of these triggers fire the overload action // is activated. Listeners are notified when the overload action transitions from // inactivated to activated, or vice versa. - repeated Trigger triggers = 2 [(validate.rules).repeated .min_items = 1]; + repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; } message OverloadManager { @@ -70,7 +71,7 @@ message OverloadManager { google.protobuf.Duration refresh_interval = 1; // The set of resources to monitor. - repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated .min_items = 1]; + repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; // The set of overload actions. repeated OverloadAction actions = 3; diff --git a/api/envoy/config/overload/v3alpha/overload.proto b/api/envoy/config/overload/v3alpha/overload.proto index 857b510e665a..72fa1d08be0f 100644 --- a/api/envoy/config/overload/v3alpha/overload.proto +++ b/api/envoy/config/overload/v3alpha/overload.proto @@ -27,7 +27,7 @@ message ResourceMonitor { // ` // * :ref:`envoy.resource_monitors.injected_resource // ` - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Configuration for the resource monitor being instantiated. oneof config_type { @@ -40,15 +40,16 @@ message ResourceMonitor { message ThresholdTrigger { // If the resource pressure is greater than or equal to this value, the trigger // will fire. - double value = 1 [(validate.rules).double = {gte: 0, lte: 1}]; + double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; } message Trigger { // The name of the resource this is a trigger for. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof trigger_oneof { option (validate.required) = true; + ThresholdTrigger threshold = 2; } } @@ -57,12 +58,12 @@ message OverloadAction { // The name of the overload action. This is just a well-known string that listeners can // use for registering callbacks. Custom overload actions should be named using reverse // DNS to ensure uniqueness. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A set of triggers for this action. If any of these triggers fire the overload action // is activated. Listeners are notified when the overload action transitions from // inactivated to activated, or vice versa. - repeated Trigger triggers = 2 [(validate.rules).repeated .min_items = 1]; + repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; } message OverloadManager { @@ -70,7 +71,7 @@ message OverloadManager { google.protobuf.Duration refresh_interval = 1; // The set of resources to monitor. - repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated .min_items = 1]; + repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; // The set of overload actions. repeated OverloadAction actions = 3; diff --git a/api/envoy/config/ratelimit/v2/rls.proto b/api/envoy/config/ratelimit/v2/rls.proto index 55577d4ab013..184b3ec081e1 100644 --- a/api/envoy/config/ratelimit/v2/rls.proto +++ b/api/envoy/config/ratelimit/v2/rls.proto @@ -14,12 +14,10 @@ import "validate/validate.proto"; // Rate limit :ref:`configuration overview `. message RateLimitServiceConfig { - reserved 1; + reserved 1, 3; // Specifies the gRPC service that hosts the rate limit service. The client // will connect to this cluster when it needs to make rate limit service // requests. - envoy.api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; - - reserved 3; + api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/ratelimit/v3alpha/rls.proto b/api/envoy/config/ratelimit/v3alpha/rls.proto index 16d5a4ad7712..be651d40e440 100644 --- a/api/envoy/config/ratelimit/v3alpha/rls.proto +++ b/api/envoy/config/ratelimit/v3alpha/rls.proto @@ -14,12 +14,10 @@ import "validate/validate.proto"; // Rate limit :ref:`configuration overview `. message RateLimitServiceConfig { - reserved 1; + reserved 1, 3; // Specifies the gRPC service that hosts the rate limit service. The client // will connect to this cluster when it needs to make rate limit service // requests. - envoy.api.v3alpha.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; - - reserved 3; + api.v3alpha.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/rbac/v2/rbac.proto b/api/envoy/config/rbac/v2/rbac.proto index 1d797d418681..e29e1a1b30d0 100644 --- a/api/envoy/config/rbac/v2/rbac.proto +++ b/api/envoy/config/rbac/v2/rbac.proto @@ -1,6 +1,11 @@ syntax = "proto3"; -import "validate/validate.proto"; +package envoy.config.rbac.v2; + +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.rbac.v2"; + import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/route/route.proto"; import "envoy/type/matcher/metadata.proto"; @@ -8,11 +13,7 @@ import "envoy/type/matcher/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; -package envoy.config.rbac.v2; - -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.config.rbac.v2"; +import "validate/validate.proto"; // [#protodoc-title: Role Based Access Control (RBAC)] @@ -84,12 +85,12 @@ message Policy { // Required. The set of permissions that define a role. Each permission is matched with OR // semantics. To match all actions for this policy, a single Permission with the `any` field set // to true should be used. - repeated Permission permissions = 1 [(validate.rules).repeated .min_items = 1]; + repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; // Required. The set of principals that are assigned/denied the role based on “action”. Each // principal is matched with OR semantics. To match all downstreams for this policy, a single // Principal with the `any` field set to true should be used. - repeated Principal principals = 2 [(validate.rules).repeated .min_items = 1]; + repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; // An optional symbolic expression specifying an access control // :ref:`condition `. The condition is combined @@ -99,11 +100,10 @@ message Policy { // Permission defines an action (or actions) that a principal can take. message Permission { - // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, // each are applied with the associated behavior. message Set { - repeated Permission rules = 1 [(validate.rules).repeated .min_items = 1]; + repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; } oneof rule { @@ -116,20 +116,20 @@ message Permission { Set or_rules = 2; // When any is set, it matches any action. - bool any = 3 [(validate.rules).bool.const = true]; + bool any = 3 [(validate.rules).bool = {const: true}]; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. - envoy.api.v2.route.HeaderMatcher header = 4; + api.v2.route.HeaderMatcher header = 4; // A CIDR block that describes the destination IP. - envoy.api.v2.core.CidrRange destination_ip = 5; + api.v2.core.CidrRange destination_ip = 5; // A port number that describes the destination port connecting to. - uint32 destination_port = 6 [(validate.rules).uint32.lte = 65535]; + uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. - envoy.type.matcher.MetadataMatcher metadata = 7; + type.matcher.MetadataMatcher metadata = 7; // Negates matching the provided permission. For instance, if the value of `not_rule` would // match, this permission would not match. Conversely, if the value of `not_rule` would not @@ -155,28 +155,26 @@ message Permission { // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. - envoy.type.matcher.StringMatcher requested_server_name = 9; + type.matcher.StringMatcher requested_server_name = 9; } } // Principal defines an identity or a group of identities for a downstream subject. message Principal { - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, // each are applied with the associated behavior. message Set { - repeated Principal ids = 1 [(validate.rules).repeated .min_items = 1]; + repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; } // Authentication attributes for a downstream. message Authenticated { reserved 1; - reserved "name"; // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the // certificate, otherwise the subject field is used. If unset, it applies to any user that is // authenticated. - envoy.type.matcher.StringMatcher principal_name = 2; + type.matcher.StringMatcher principal_name = 2; } oneof identifier { @@ -189,20 +187,20 @@ message Principal { Set or_ids = 2; // When any is set, it matches any downstream. - bool any = 3 [(validate.rules).bool.const = true]; + bool any = 3 [(validate.rules).bool = {const: true}]; // Authenticated attributes that identify the downstream. Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. - envoy.api.v2.core.CidrRange source_ip = 5; + api.v2.core.CidrRange source_ip = 5; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. - envoy.api.v2.route.HeaderMatcher header = 6; + api.v2.route.HeaderMatcher header = 6; // Metadata that describes additional information about the principal. - envoy.type.matcher.MetadataMatcher metadata = 7; + type.matcher.MetadataMatcher metadata = 7; // Negates matching the provided principal. For instance, if the value of `not_id` would match, // this principal would not match. Conversely, if the value of `not_id` would not match, this diff --git a/api/envoy/config/rbac/v3alpha/rbac.proto b/api/envoy/config/rbac/v3alpha/rbac.proto index 3fe9fe41c9d9..c354ba7e0f5b 100644 --- a/api/envoy/config/rbac/v3alpha/rbac.proto +++ b/api/envoy/config/rbac/v3alpha/rbac.proto @@ -1,6 +1,11 @@ syntax = "proto3"; -import "validate/validate.proto"; +package envoy.config.rbac.v3alpha; + +option java_outer_classname = "RbacProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.rbac.v3alpha"; + import "envoy/api/v3alpha/core/address.proto"; import "envoy/api/v3alpha/route/route.proto"; import "envoy/type/matcher/metadata.proto"; @@ -8,11 +13,7 @@ import "envoy/type/matcher/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; -package envoy.config.rbac.v3alpha; - -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.config.rbac.v3alpha"; +import "validate/validate.proto"; // [#protodoc-title: Role Based Access Control (RBAC)] @@ -84,25 +85,25 @@ message Policy { // Required. The set of permissions that define a role. Each permission is matched with OR // semantics. To match all actions for this policy, a single Permission with the `any` field set // to true should be used. - repeated Permission permissions = 1 [(validate.rules).repeated .min_items = 1]; + repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; // Required. The set of principals that are assigned/denied the role based on “action”. Each // principal is matched with OR semantics. To match all downstreams for this policy, a single // Principal with the `any` field set to true should be used. - repeated Principal principals = 2 [(validate.rules).repeated .min_items = 1]; + repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - // An optional symbolic expression specifying an access control condition. - // The condition is combined with AND semantics. + // An optional symbolic expression specifying an access control + // :ref:`condition `. The condition is combined + // with the permissions and the principals as a clause with AND semantics. google.api.expr.v1alpha1.Expr condition = 3; } // Permission defines an action (or actions) that a principal can take. message Permission { - // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, // each are applied with the associated behavior. message Set { - repeated Permission rules = 1 [(validate.rules).repeated .min_items = 1]; + repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; } oneof rule { @@ -115,20 +116,20 @@ message Permission { Set or_rules = 2; // When any is set, it matches any action. - bool any = 3 [(validate.rules).bool.const = true]; + bool any = 3 [(validate.rules).bool = {const: true}]; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. - envoy.api.v3alpha.route.HeaderMatcher header = 4; + api.v3alpha.route.HeaderMatcher header = 4; // A CIDR block that describes the destination IP. - envoy.api.v3alpha.core.CidrRange destination_ip = 5; + api.v3alpha.core.CidrRange destination_ip = 5; // A port number that describes the destination port connecting to. - uint32 destination_port = 6 [(validate.rules).uint32.lte = 65535]; + uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. - envoy.type.matcher.MetadataMatcher metadata = 7; + type.matcher.MetadataMatcher metadata = 7; // Negates matching the provided permission. For instance, if the value of `not_rule` would // match, this permission would not match. Conversely, if the value of `not_rule` would not @@ -154,28 +155,26 @@ message Permission { // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. - envoy.type.matcher.StringMatcher requested_server_name = 9; + type.matcher.StringMatcher requested_server_name = 9; } } // Principal defines an identity or a group of identities for a downstream subject. message Principal { - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, // each are applied with the associated behavior. message Set { - repeated Principal ids = 1 [(validate.rules).repeated .min_items = 1]; + repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; } // Authentication attributes for a downstream. message Authenticated { reserved 1; - reserved "name"; // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the // certificate, otherwise the subject field is used. If unset, it applies to any user that is // authenticated. - envoy.type.matcher.StringMatcher principal_name = 2; + type.matcher.StringMatcher principal_name = 2; } oneof identifier { @@ -188,20 +187,20 @@ message Principal { Set or_ids = 2; // When any is set, it matches any downstream. - bool any = 3 [(validate.rules).bool.const = true]; + bool any = 3 [(validate.rules).bool = {const: true}]; // Authenticated attributes that identify the downstream. Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. - envoy.api.v3alpha.core.CidrRange source_ip = 5; + api.v3alpha.core.CidrRange source_ip = 5; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. - envoy.api.v3alpha.route.HeaderMatcher header = 6; + api.v3alpha.route.HeaderMatcher header = 6; // Metadata that describes additional information about the principal. - envoy.type.matcher.MetadataMatcher metadata = 7; + type.matcher.MetadataMatcher metadata = 7; // Negates matching the provided principal. For instance, if the value of `not_id` would match, // this principal would not match. Conversely, if the value of `not_id` would not match, this diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto index 110123e3c332..ad3d1ab1eada 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto +++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto @@ -14,5 +14,5 @@ import "validate/validate.proto"; // fraction of currently reserved heap memory divided by a statically configured maximum // specified in the FixedHeapConfig. message FixedHeapConfig { - uint64 max_heap_size_bytes = 1 [(validate.rules).uint64.gt = 0]; + uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}]; } diff --git a/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto index bc84ee992452..5f488d93bac2 100644 --- a/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto +++ b/api/envoy/config/resource_monitor/fixed_heap/v3alpha/fixed_heap.proto @@ -14,5 +14,5 @@ import "validate/validate.proto"; // fraction of currently reserved heap memory divided by a statically configured maximum // specified in the FixedHeapConfig. message FixedHeapConfig { - uint64 max_heap_size_bytes = 1 [(validate.rules).uint64.gt = 0]; + uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}]; } diff --git a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto index 64c984fa0cb3..2a8a1a43150c 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto +++ b/api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto @@ -15,5 +15,5 @@ import "validate/validate.proto"; // the resource pressure and be updated atomically by a symbolic link swap. // This is intended primarily for integration tests to force Envoy into an overloaded state. message InjectedResourceConfig { - string filename = 1 [(validate.rules).string.min_bytes = 1]; + string filename = 1 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto b/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto index 555e15323f46..2499c46df957 100644 --- a/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto +++ b/api/envoy/config/resource_monitor/injected_resource/v3alpha/injected_resource.proto @@ -15,5 +15,5 @@ import "validate/validate.proto"; // the resource pressure and be updated atomically by a symbolic link swap. // This is intended primarily for integration tests to force Envoy into an overloaded state. message InjectedResourceConfig { - string filename = 1 [(validate.rules).string.min_bytes = 1]; + string filename = 1 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto index 43f5013b27f1..f651dc3d3ed9 100644 --- a/api/envoy/config/trace/v2/trace.proto +++ b/api/envoy/config/trace/v2/trace.proto @@ -1,6 +1,3 @@ -// [#protodoc-title: Tracing] -// Tracing :ref:`architecture overview `. - syntax = "proto3"; package envoy.config.trace.v2; @@ -10,15 +7,18 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.trace.v2"; import "envoy/api/v2/core/grpc_service.proto"; -import "opencensus/proto/trace/v1/trace_config.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; - import "google/protobuf/wrappers.proto"; +import "opencensus/proto/trace/v1/trace_config.proto"; + import "validate/validate.proto"; +// [#protodoc-title: Tracing] +// Tracing :ref:`architecture overview `. + // The tracing configuration specifies global // settings for the HTTP tracer used by Envoy. The configuration is defined by // the :ref:`Bootstrap ` :ref:`tracing @@ -34,7 +34,7 @@ message Tracing { // - *envoy.dynamic.ot* // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: @@ -50,6 +50,7 @@ message Tracing { google.protobuf.Any typed_config = 3; } } + // Provides configuration for the HTTP tracer. Http http = 1; } @@ -57,33 +58,15 @@ message Tracing { // Configuration for the LightStep tracer. message LightstepConfig { // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // File containing the access token to the `LightStep // `_ API. - string access_token_file = 2 [(validate.rules).string.min_bytes = 1]; + string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; } // Configuration for the Zipkin tracer. message ZipkinConfig { - // The cluster manager cluster that hosts the Zipkin collectors. Note that the - // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster - // resources `. - string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; - - // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation, the API endpoint is typically - // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string.min_bytes = 1]; - - // Determines whether a 128bit trace id will be used when creating a new - // trace instance. The default value is false, which will result in a 64 bit trace id being used. - bool trace_id_128bit = 3; - - // Determines whether client and server spans will share the same span context. - // The default value is true. - google.protobuf.BoolValue shared_span_context = 4; - // Available Zipkin collector endpoint versions. enum CollectorEndpointVersion { // Zipkin API v1, JSON over HTTP. @@ -105,6 +88,24 @@ message ZipkinConfig { GRPC = 3; } + // The cluster manager cluster that hosts the Zipkin collectors. Note that the + // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster + // resources `. + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The API endpoint of the Zipkin service where the spans will be sent. When + // using a standard Zipkin installation, the API endpoint is typically + // /api/v1/spans, which is the default value. + string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Determines whether a 128bit trace id will be used when creating a new + // trace instance. The default value is false, which will result in a 64 bit trace id being used. + bool trace_id_128bit = 3; + + // Determines whether client and server spans will share the same span context. + // The default value is true. + google.protobuf.BoolValue shared_span_context = 4; + // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be // used. CollectorEndpointVersion collector_endpoint_version = 5; @@ -116,7 +117,7 @@ message ZipkinConfig { message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. - string library = 1 [(validate.rules).string.min_bytes = 1]; + string library = 1 [(validate.rules).string = {min_bytes: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. @@ -126,14 +127,34 @@ message DynamicOtConfig { // Configuration for the Datadog tracer. message DatadogConfig { // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string.min_bytes = 1]; + string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; } // Configuration for the OpenCensus tracer. // [#proto-status: experimental] message OpenCensusConfig { + enum TraceContext { + // No-op default, no trace context is utilized. + NONE = 0; + + // W3C Trace-Context format "traceparent:" header. + TRACE_CONTEXT = 1; + + // Binary "grpc-trace-bin:" header. + GRPC_TRACE_BIN = 2; + + // "X-Cloud-Trace-Context:" header. + CLOUD_TRACE_CONTEXT = 3; + + // X-B3-* headers. + B3 = 4; + } + + reserved 7; + // Configures tracing, e.g. the sampler, max number of annotations, etc. opencensus.proto.trace.v1.TraceConfig trace_config = 1; @@ -169,25 +190,6 @@ message OpenCensusConfig { // format: https://github.com/grpc/grpc/blob/master/doc/naming.md string ocagent_address = 12; - reserved 7; // Formerly zipkin_service_name. - - enum TraceContext { - // No-op default, no trace context is utilized. - NONE = 0; - - // W3C Trace-Context format "traceparent:" header. - TRACE_CONTEXT = 1; - - // Binary "grpc-trace-bin:" header. - GRPC_TRACE_BIN = 2; - - // "X-Cloud-Trace-Context:" header. - CLOUD_TRACE_CONTEXT = 3; - - // X-B3-* headers. - B3 = 4; - } - // List of incoming trace context headers we will accept. First one found // wins. repeated TraceContext incoming_trace_context = 8; @@ -199,5 +201,5 @@ message OpenCensusConfig { // Configuration structure. message TraceServiceConfig { // The upstream gRPC cluster that hosts the metrics service. - envoy.api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; + api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/trace/v3alpha/trace.proto b/api/envoy/config/trace/v3alpha/trace.proto index f98f1f708962..ec17869dd60d 100644 --- a/api/envoy/config/trace/v3alpha/trace.proto +++ b/api/envoy/config/trace/v3alpha/trace.proto @@ -1,6 +1,3 @@ -// [#protodoc-title: Tracing] -// Tracing :ref:`architecture overview `. - syntax = "proto3"; package envoy.config.trace.v3alpha; @@ -10,15 +7,18 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.trace.v3alpha"; import "envoy/api/v3alpha/core/grpc_service.proto"; -import "opencensus/proto/trace/v1/trace_config.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; - import "google/protobuf/wrappers.proto"; +import "opencensus/proto/trace/v1/trace_config.proto"; + import "validate/validate.proto"; +// [#protodoc-title: Tracing] +// Tracing :ref:`architecture overview `. + // The tracing configuration specifies global // settings for the HTTP tracer used by Envoy. The configuration is defined by // the :ref:`Bootstrap ` :ref:`tracing @@ -34,7 +34,7 @@ message Tracing { // - *envoy.dynamic.ot* // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: @@ -50,6 +50,7 @@ message Tracing { google.protobuf.Any typed_config = 3; } } + // Provides configuration for the HTTP tracer. Http http = 1; } @@ -57,33 +58,15 @@ message Tracing { // Configuration for the LightStep tracer. message LightstepConfig { // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // File containing the access token to the `LightStep // `_ API. - string access_token_file = 2 [(validate.rules).string.min_bytes = 1]; + string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; } // Configuration for the Zipkin tracer. message ZipkinConfig { - // The cluster manager cluster that hosts the Zipkin collectors. Note that the - // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster - // resources `. - string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; - - // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation, the API endpoint is typically - // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string.min_bytes = 1]; - - // Determines whether a 128bit trace id will be used when creating a new - // trace instance. The default value is false, which will result in a 64 bit trace id being used. - bool trace_id_128bit = 3; - - // Determines whether client and server spans will share the same span context. - // The default value is true. - google.protobuf.BoolValue shared_span_context = 4; - // Available Zipkin collector endpoint versions. enum CollectorEndpointVersion { // Zipkin API v1, JSON over HTTP. @@ -105,6 +88,24 @@ message ZipkinConfig { GRPC = 3; } + // The cluster manager cluster that hosts the Zipkin collectors. Note that the + // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster + // resources `. + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The API endpoint of the Zipkin service where the spans will be sent. When + // using a standard Zipkin installation, the API endpoint is typically + // /api/v1/spans, which is the default value. + string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Determines whether a 128bit trace id will be used when creating a new + // trace instance. The default value is false, which will result in a 64 bit trace id being used. + bool trace_id_128bit = 3; + + // Determines whether client and server spans will share the same span context. + // The default value is true. + google.protobuf.BoolValue shared_span_context = 4; + // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be // used. CollectorEndpointVersion collector_endpoint_version = 5; @@ -116,7 +117,7 @@ message ZipkinConfig { message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. - string library = 1 [(validate.rules).string.min_bytes = 1]; + string library = 1 [(validate.rules).string = {min_bytes: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. @@ -126,14 +127,34 @@ message DynamicOtConfig { // Configuration for the Datadog tracer. message DatadogConfig { // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string.min_bytes = 1]; + string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; } // Configuration for the OpenCensus tracer. // [#proto-status: experimental] message OpenCensusConfig { + enum TraceContext { + // No-op default, no trace context is utilized. + NONE = 0; + + // W3C Trace-Context format "traceparent:" header. + TRACE_CONTEXT = 1; + + // Binary "grpc-trace-bin:" header. + GRPC_TRACE_BIN = 2; + + // "X-Cloud-Trace-Context:" header. + CLOUD_TRACE_CONTEXT = 3; + + // X-B3-* headers. + B3 = 4; + } + + reserved 7; + // Configures tracing, e.g. the sampler, max number of annotations, etc. opencensus.proto.trace.v1.TraceConfig trace_config = 1; @@ -169,25 +190,6 @@ message OpenCensusConfig { // format: https://github.com/grpc/grpc/blob/master/doc/naming.md string ocagent_address = 12; - reserved 7; // Formerly zipkin_service_name. - - enum TraceContext { - // No-op default, no trace context is utilized. - NONE = 0; - - // W3C Trace-Context format "traceparent:" header. - TRACE_CONTEXT = 1; - - // Binary "grpc-trace-bin:" header. - GRPC_TRACE_BIN = 2; - - // "X-Cloud-Trace-Context:" header. - CLOUD_TRACE_CONTEXT = 3; - - // X-B3-* headers. - B3 = 4; - } - // List of incoming trace context headers we will accept. First one found // wins. repeated TraceContext incoming_trace_context = 8; @@ -199,5 +201,5 @@ message OpenCensusConfig { // Configuration structure. message TraceServiceConfig { // The upstream gRPC cluster that hosts the metrics service. - envoy.api.v3alpha.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; + api.v3alpha.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto index e68b40dae530..ffb121fcb061 100644 --- a/api/envoy/config/transport_socket/tap/v2alpha/tap.proto +++ b/api/envoy/config/transport_socket/tap/v2alpha/tap.proto @@ -6,20 +6,20 @@ option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha"; -// [#protodoc-title: Tap] - -import "envoy/config/common/tap/v2alpha/common.proto"; import "envoy/api/v2/core/base.proto"; +import "envoy/config/common/tap/v2alpha/common.proto"; import "validate/validate.proto"; +// [#protodoc-title: Tap] + // Configuration for tap transport socket. This wraps another transport socket, providing the // ability to interpose and record in plain text any traffic that is surfaced to Envoy. message Tap { // Common configuration for the tap transport socket. common.tap.v2alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; // The underlying transport socket being wrapped. - api.v2.core.TransportSocket transport_socket = 2 [(validate.rules).message.required = true]; + api.v2.core.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/transport_socket/tap/v3alpha/tap.proto b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto index 21625e17ef9c..dc5c303d0214 100644 --- a/api/envoy/config/transport_socket/tap/v3alpha/tap.proto +++ b/api/envoy/config/transport_socket/tap/v3alpha/tap.proto @@ -6,20 +6,21 @@ option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v3alpha"; -// [#protodoc-title: Tap] - -import "envoy/config/common/tap/v3alpha/common.proto"; import "envoy/api/v3alpha/core/base.proto"; +import "envoy/config/common/tap/v3alpha/common.proto"; import "validate/validate.proto"; +// [#protodoc-title: Tap] + // Configuration for tap transport socket. This wraps another transport socket, providing the // ability to interpose and record in plain text any traffic that is surfaced to Envoy. message Tap { // Common configuration for the tap transport socket. common.tap.v3alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message.required = true]; + [(validate.rules).message = {required: true}]; // The underlying transport socket being wrapped. - api.v3alpha.core.TransportSocket transport_socket = 2 [(validate.rules).message.required = true]; + api.v3alpha.core.TransportSocket transport_socket = 2 + [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/data/accesslog/v2/accesslog.proto b/api/envoy/data/accesslog/v2/accesslog.proto index bc6ff86bbd85..1cb7d13112e5 100644 --- a/api/envoy/data/accesslog/v2/accesslog.proto +++ b/api/envoy/data/accesslog/v2/accesslog.proto @@ -12,6 +12,7 @@ import "envoy/api/v2/core/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; // [#protodoc-title: gRPC access logs] @@ -34,9 +35,6 @@ message TCPAccessLogEntry { } message HTTPAccessLogEntry { - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - // HTTP version enum HTTPVersion { PROTOCOL_UNSPECIFIED = 0; @@ -44,6 +42,10 @@ message HTTPAccessLogEntry { HTTP11 = 2; HTTP2 = 3; } + + // Common properties shared by all Envoy access logs. + AccessLogCommon common_properties = 1; + HTTPVersion protocol_version = 2; // Description of the incoming HTTP request. @@ -67,15 +69,15 @@ message AccessLogCommon { // [#not-implemented-hide:] // This field indicates the rate at which this log entry was sampled. // Valid range is (0.0, 1.0]. - double sample_rate = 1 [(validate.rules).double.gt = 0.0, (validate.rules).double.lte = 1.0]; + double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; // This field is the remote/origin address on which the request from the user was received. // Note: This may not be the physical peer. E.g, if the remote address is inferred from for // example the x-forwarder-for header, proxy protocol, etc. - envoy.api.v2.core.Address downstream_remote_address = 2; + api.v2.core.Address downstream_remote_address = 2; // This field is the local/destination address on which the request from the user was received. - envoy.api.v2.core.Address downstream_local_address = 3; + api.v2.core.Address downstream_local_address = 3; // If the connection is secure,S this field will contain TLS properties. TLSProperties tls_properties = 4; @@ -124,10 +126,10 @@ message AccessLogCommon { // The upstream remote/destination address that handles this exchange. This does not include // retries. - envoy.api.v2.core.Address upstream_remote_address = 13; + api.v2.core.Address upstream_remote_address = 13; // The upstream local/origin address that handles this exchange. This does not include retries. - envoy.api.v2.core.Address upstream_local_address = 14; + api.v2.core.Address upstream_local_address = 14; // The upstream cluster that *upstream_remote_address* belongs to. string upstream_cluster = 15; @@ -143,7 +145,7 @@ message AccessLogCommon { // route created from a higher level forwarding rule with some ID can place // that ID in this field and cross reference later. It can also be used to // determine if a canary endpoint was used or not. - envoy.api.v2.core.Metadata metadata = 17; + api.v2.core.Metadata metadata = 17; // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the // failure reason from the transport socket. The format of this field depends on the configured @@ -153,10 +155,27 @@ message AccessLogCommon { // The name of the route string route_name = 19; + + // This field is the downstream direct remote address on which the request from the user was + // received. Note: This is always the physical peer, even if the remote address is inferred from + // for example the x-forwarder-for header, proxy protocol, etc. + api.v2.core.Address downstream_direct_remote_address = 20; } // Flags indicating occurrences during request/response processing. message ResponseFlags { + message Unauthorized { + // Reasons why the request was unauthorized + enum Reason { + REASON_UNSPECIFIED = 0; + + // The request was denied by the external authorization service. + EXTERNAL_SERVICE = 1; + } + + Reason reason = 1; + } + // Indicates local server healthcheck failed. bool failed_local_healthcheck = 1; @@ -193,17 +212,6 @@ message ResponseFlags { // Indicates that the request was rate-limited locally. bool rate_limited = 12; - message Unauthorized { - // Reasons why the request was unauthorized - enum Reason { - REASON_UNSPECIFIED = 0; - // The request was denied by the external authorization service. - EXTERNAL_SERVICE = 1; - } - - Reason reason = 1; - } - // Indicates if the request was deemed unauthorized and the reason for it. Unauthorized unauthorized_details = 13; @@ -233,23 +241,12 @@ message TLSProperties { TLSv1_2 = 3; TLSv1_3 = 4; } - // Version of TLS that was negotiated. - TLSVersion tls_version = 1; - - // TLS cipher suite negotiated during handshake. The value is a - // four-digit hex code defined by the IANA TLS Cipher Suite Registry - // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). - // - // Here it is expressed as an integer. - google.protobuf.UInt32Value tls_cipher_suite = 2; - - // SNI hostname from handshake. - string tls_sni_hostname = 3; message CertificateProperties { message SubjectAltName { oneof san { string uri = 1; + // [#not-implemented-hide:] string dns = 2; } @@ -262,6 +259,19 @@ message TLSProperties { string subject = 2; } + // Version of TLS that was negotiated. + TLSVersion tls_version = 1; + + // TLS cipher suite negotiated during handshake. The value is a + // four-digit hex code defined by the IANA TLS Cipher Suite Registry + // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). + // + // Here it is expressed as an integer. + google.protobuf.UInt32Value tls_cipher_suite = 2; + + // SNI hostname from handshake. + string tls_sni_hostname = 3; + // Properties of the local certificate used to negotiate TLS. CertificateProperties local_certificate_properties = 4; @@ -276,7 +286,7 @@ message HTTPRequestProperties { // The request method (RFC 7231/2616). // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] - envoy.api.v2.core.RequestMethod request_method = 1; + api.v2.core.RequestMethod request_method = 1; // The scheme portion of the incoming request URI. string scheme = 2; diff --git a/api/envoy/data/accesslog/v3alpha/accesslog.proto b/api/envoy/data/accesslog/v3alpha/accesslog.proto index 2cdd44bbd10f..5a877cf75a21 100644 --- a/api/envoy/data/accesslog/v3alpha/accesslog.proto +++ b/api/envoy/data/accesslog/v3alpha/accesslog.proto @@ -12,6 +12,7 @@ import "envoy/api/v3alpha/core/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; // [#protodoc-title: gRPC access logs] @@ -34,9 +35,6 @@ message TCPAccessLogEntry { } message HTTPAccessLogEntry { - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - // HTTP version enum HTTPVersion { PROTOCOL_UNSPECIFIED = 0; @@ -44,6 +42,10 @@ message HTTPAccessLogEntry { HTTP11 = 2; HTTP2 = 3; } + + // Common properties shared by all Envoy access logs. + AccessLogCommon common_properties = 1; + HTTPVersion protocol_version = 2; // Description of the incoming HTTP request. @@ -67,15 +69,15 @@ message AccessLogCommon { // [#not-implemented-hide:] // This field indicates the rate at which this log entry was sampled. // Valid range is (0.0, 1.0]. - double sample_rate = 1 [(validate.rules).double.gt = 0.0, (validate.rules).double.lte = 1.0]; + double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; // This field is the remote/origin address on which the request from the user was received. // Note: This may not be the physical peer. E.g, if the remote address is inferred from for // example the x-forwarder-for header, proxy protocol, etc. - envoy.api.v3alpha.core.Address downstream_remote_address = 2; + api.v3alpha.core.Address downstream_remote_address = 2; // This field is the local/destination address on which the request from the user was received. - envoy.api.v3alpha.core.Address downstream_local_address = 3; + api.v3alpha.core.Address downstream_local_address = 3; // If the connection is secure,S this field will contain TLS properties. TLSProperties tls_properties = 4; @@ -124,10 +126,10 @@ message AccessLogCommon { // The upstream remote/destination address that handles this exchange. This does not include // retries. - envoy.api.v3alpha.core.Address upstream_remote_address = 13; + api.v3alpha.core.Address upstream_remote_address = 13; // The upstream local/origin address that handles this exchange. This does not include retries. - envoy.api.v3alpha.core.Address upstream_local_address = 14; + api.v3alpha.core.Address upstream_local_address = 14; // The upstream cluster that *upstream_remote_address* belongs to. string upstream_cluster = 15; @@ -143,7 +145,7 @@ message AccessLogCommon { // route created from a higher level forwarding rule with some ID can place // that ID in this field and cross reference later. It can also be used to // determine if a canary endpoint was used or not. - envoy.api.v3alpha.core.Metadata metadata = 17; + api.v3alpha.core.Metadata metadata = 17; // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the // failure reason from the transport socket. The format of this field depends on the configured @@ -153,10 +155,27 @@ message AccessLogCommon { // The name of the route string route_name = 19; + + // This field is the downstream direct remote address on which the request from the user was + // received. Note: This is always the physical peer, even if the remote address is inferred from + // for example the x-forwarder-for header, proxy protocol, etc. + api.v3alpha.core.Address downstream_direct_remote_address = 20; } // Flags indicating occurrences during request/response processing. message ResponseFlags { + message Unauthorized { + // Reasons why the request was unauthorized + enum Reason { + REASON_UNSPECIFIED = 0; + + // The request was denied by the external authorization service. + EXTERNAL_SERVICE = 1; + } + + Reason reason = 1; + } + // Indicates local server healthcheck failed. bool failed_local_healthcheck = 1; @@ -193,17 +212,6 @@ message ResponseFlags { // Indicates that the request was rate-limited locally. bool rate_limited = 12; - message Unauthorized { - // Reasons why the request was unauthorized - enum Reason { - REASON_UNSPECIFIED = 0; - // The request was denied by the external authorization service. - EXTERNAL_SERVICE = 1; - } - - Reason reason = 1; - } - // Indicates if the request was deemed unauthorized and the reason for it. Unauthorized unauthorized_details = 13; @@ -233,23 +241,12 @@ message TLSProperties { TLSv1_2 = 3; TLSv1_3 = 4; } - // Version of TLS that was negotiated. - TLSVersion tls_version = 1; - - // TLS cipher suite negotiated during handshake. The value is a - // four-digit hex code defined by the IANA TLS Cipher Suite Registry - // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). - // - // Here it is expressed as an integer. - google.protobuf.UInt32Value tls_cipher_suite = 2; - - // SNI hostname from handshake. - string tls_sni_hostname = 3; message CertificateProperties { message SubjectAltName { oneof san { string uri = 1; + // [#not-implemented-hide:] string dns = 2; } @@ -262,6 +259,19 @@ message TLSProperties { string subject = 2; } + // Version of TLS that was negotiated. + TLSVersion tls_version = 1; + + // TLS cipher suite negotiated during handshake. The value is a + // four-digit hex code defined by the IANA TLS Cipher Suite Registry + // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). + // + // Here it is expressed as an integer. + google.protobuf.UInt32Value tls_cipher_suite = 2; + + // SNI hostname from handshake. + string tls_sni_hostname = 3; + // Properties of the local certificate used to negotiate TLS. CertificateProperties local_certificate_properties = 4; @@ -276,7 +286,7 @@ message HTTPRequestProperties { // The request method (RFC 7231/2616). // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] - envoy.api.v3alpha.core.RequestMethod request_method = 1; + api.v3alpha.core.RequestMethod request_method = 1; // The scheme portion of the incoming request URI. string scheme = 2; diff --git a/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto b/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto index 1273f84d6df2..9ce85ce33354 100644 --- a/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto +++ b/api/envoy/data/cluster/v2alpha/outlier_detection_event.proto @@ -14,35 +14,6 @@ import "validate/validate.proto"; // [#protodoc-title: Outlier detection logging events] // :ref:`Outlier detection logging `. -message OutlierDetectionEvent { - // In case of eject represents type of ejection that took place. - OutlierEjectionType type = 1 [(validate.rules).enum.defined_only = true]; - // Timestamp for event. - google.protobuf.Timestamp timestamp = 2; - // The time in seconds since the last action (either an ejection or unejection) took place. - google.protobuf.UInt64Value secs_since_last_action = 3; - // The :ref:`cluster ` that owns the ejected host. - string cluster_name = 4 [(validate.rules).string.min_bytes = 1]; - // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - string upstream_url = 5 [(validate.rules).string.min_bytes = 1]; - // The action that took place. - Action action = 6 [(validate.rules).enum.defined_only = true]; - // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to - // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and - // then re-added). - uint32 num_ejections = 7; - // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was - // ejected. ``false`` means the event was logged but the host was not actually ejected. - bool enforced = 8; - - oneof event { - option (validate.required) = true; - OutlierEjectSuccessRate eject_success_rate_event = 9; - OutlierEjectConsecutive eject_consecutive_event = 10; - OutlierEjectFailurePercentage eject_failure_percentage_event = 11; - } -} - // Type of ejection that took place enum OutlierEjectionType { // In case upstream host returns certain number of consecutive 5xx. @@ -52,8 +23,10 @@ enum OutlierEjectionType { // See :ref:`Cluster outlier detection ` documentation for // details. CONSECUTIVE_5XX = 0; + // In case upstream host returns certain number of consecutive gateway errors CONSECUTIVE_GATEWAY_FAILURE = 1; + // Runs over aggregated success rate statistics from every host in cluster // and selects hosts for which ratio of successful replies deviates from other hosts // in the cluster. @@ -63,12 +36,14 @@ enum OutlierEjectionType { // statistics. See :ref:`Cluster outlier detection ` // documentation for details. SUCCESS_RATE = 2; + // Consecutive local origin failures: Connection failures, resets, timeouts, etc // This type of ejection happens only when // :ref:`outlier_detection.split_external_local_origin_errors` // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; + // Runs over aggregated success rate statistics for local origin failures // for all hosts in the cluster and selects hosts for which success rate deviates from other // hosts in the cluster. This type of ejection happens only when @@ -76,9 +51,11 @@ enum OutlierEjectionType { // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for SUCCESS_RATE_LOCAL_ORIGIN = 4; + // Runs over aggregated success rate statistics from every host in cluster and selects hosts for // which ratio of failed replies is above configured value. FAILURE_PERCENTAGE = 5; + // Runs over aggregated success rate statistics for local origin failures from every host in // cluster and selects hosts for which ratio of failed replies is above configured value. FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; @@ -88,18 +65,60 @@ enum OutlierEjectionType { enum Action { // In case host was excluded from service EJECT = 0; + // In case host was brought back into service UNEJECT = 1; } +message OutlierDetectionEvent { + // In case of eject represents type of ejection that took place. + OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; + + // Timestamp for event. + google.protobuf.Timestamp timestamp = 2; + + // The time in seconds since the last action (either an ejection or unejection) took place. + google.protobuf.UInt64Value secs_since_last_action = 3; + + // The :ref:`cluster ` that owns the ejected host. + string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}]; + + // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. + string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}]; + + // The action that took place. + Action action = 6 [(validate.rules).enum = {defined_only: true}]; + + // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to + // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and + // then re-added). + uint32 num_ejections = 7; + + // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was + // ejected. ``false`` means the event was logged but the host was not actually ejected. + bool enforced = 8; + + oneof event { + option (validate.required) = true; + + OutlierEjectSuccessRate eject_success_rate_event = 9; + + OutlierEjectConsecutive eject_consecutive_event = 10; + + OutlierEjectFailurePercentage eject_failure_percentage_event = 11; + } +} + message OutlierEjectSuccessRate { // Host’s success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32.lte = 100]; + uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; + // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 // range. - uint32 cluster_average_success_rate = 2 [(validate.rules).uint32.lte = 100]; + uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; + // Success rate ejection threshold at the time of the ejection event. - uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32.lte = 100]; + uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; } message OutlierEjectConsecutive { @@ -107,5 +126,5 @@ message OutlierEjectConsecutive { message OutlierEjectFailurePercentage { // Host's success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32.lte = 100]; + uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; } diff --git a/api/envoy/data/cluster/v3alpha/outlier_detection_event.proto b/api/envoy/data/cluster/v3alpha/outlier_detection_event.proto index 305163659728..943823d39cf7 100644 --- a/api/envoy/data/cluster/v3alpha/outlier_detection_event.proto +++ b/api/envoy/data/cluster/v3alpha/outlier_detection_event.proto @@ -14,34 +14,6 @@ import "validate/validate.proto"; // [#protodoc-title: Outlier detection logging events] // :ref:`Outlier detection logging `. -message OutlierDetectionEvent { - // In case of eject represents type of ejection that took place. - OutlierEjectionType type = 1 [(validate.rules).enum.defined_only = true]; - // Timestamp for event. - google.protobuf.Timestamp timestamp = 2; - // The time in seconds since the last action (either an ejection or unejection) took place. - google.protobuf.UInt64Value secs_since_last_action = 3; - // The :ref:`cluster ` that owns the ejected host. - string cluster_name = 4 [(validate.rules).string.min_bytes = 1]; - // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - string upstream_url = 5 [(validate.rules).string.min_bytes = 1]; - // The action that took place. - Action action = 6 [(validate.rules).enum.defined_only = true]; - // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to - // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and - // then re-added). - uint32 num_ejections = 7; - // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was - // ejected. ``false`` means the event was logged but the host was not actually ejected. - bool enforced = 8; - - oneof event { - option (validate.required) = true; - OutlierEjectSuccessRate eject_success_rate_event = 9; - OutlierEjectConsecutive eject_consecutive_event = 10; - } -} - // Type of ejection that took place enum OutlierEjectionType { // In case upstream host returns certain number of consecutive 5xx. @@ -51,8 +23,10 @@ enum OutlierEjectionType { // See :ref:`Cluster outlier detection ` documentation for // details. CONSECUTIVE_5XX = 0; + // In case upstream host returns certain number of consecutive gateway errors CONSECUTIVE_GATEWAY_FAILURE = 1; + // Runs over aggregated success rate statistics from every host in cluster // and selects hosts for which ratio of successful replies deviates from other hosts // in the cluster. @@ -62,12 +36,14 @@ enum OutlierEjectionType { // statistics. See :ref:`Cluster outlier detection ` // documentation for details. SUCCESS_RATE = 2; + // Consecutive local origin failures: Connection failures, resets, timeouts, etc // This type of ejection happens only when // :ref:`outlier_detection.split_external_local_origin_errors` // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; + // Runs over aggregated success rate statistics for local origin failures // for all hosts in the cluster and selects hosts for which success rate deviates from other // hosts in the cluster. This type of ejection happens only when @@ -75,25 +51,80 @@ enum OutlierEjectionType { // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for SUCCESS_RATE_LOCAL_ORIGIN = 4; + + // Runs over aggregated success rate statistics from every host in cluster and selects hosts for + // which ratio of failed replies is above configured value. + FAILURE_PERCENTAGE = 5; + + // Runs over aggregated success rate statistics for local origin failures from every host in + // cluster and selects hosts for which ratio of failed replies is above configured value. + FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; } // Represents possible action applied to upstream host enum Action { // In case host was excluded from service EJECT = 0; + // In case host was brought back into service UNEJECT = 1; } +message OutlierDetectionEvent { + // In case of eject represents type of ejection that took place. + OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; + + // Timestamp for event. + google.protobuf.Timestamp timestamp = 2; + + // The time in seconds since the last action (either an ejection or unejection) took place. + google.protobuf.UInt64Value secs_since_last_action = 3; + + // The :ref:`cluster ` that owns the ejected host. + string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}]; + + // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. + string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}]; + + // The action that took place. + Action action = 6 [(validate.rules).enum = {defined_only: true}]; + + // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to + // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and + // then re-added). + uint32 num_ejections = 7; + + // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was + // ejected. ``false`` means the event was logged but the host was not actually ejected. + bool enforced = 8; + + oneof event { + option (validate.required) = true; + + OutlierEjectSuccessRate eject_success_rate_event = 9; + + OutlierEjectConsecutive eject_consecutive_event = 10; + + OutlierEjectFailurePercentage eject_failure_percentage_event = 11; + } +} + message OutlierEjectSuccessRate { // Host’s success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32.lte = 100]; + uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; + // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 // range. - uint32 cluster_average_success_rate = 2 [(validate.rules).uint32.lte = 100]; + uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; + // Success rate ejection threshold at the time of the ejection event. - uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32.lte = 100]; + uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; } message OutlierEjectConsecutive { } + +message OutlierEjectFailurePercentage { + // Host's success rate at the time of the ejection event on a 0-100 range. + uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; +} diff --git a/api/envoy/data/core/v2alpha/health_check_event.proto b/api/envoy/data/core/v2alpha/health_check_event.proto index c5b2f70a5e24..29c66421e112 100644 --- a/api/envoy/data/core/v2alpha/health_check_event.proto +++ b/api/envoy/data/core/v2alpha/health_check_event.proto @@ -15,10 +15,25 @@ import "validate/validate.proto"; // [#protodoc-title: Health check logging events] // :ref:`Health check logging `. +enum HealthCheckFailureType { + ACTIVE = 0; + PASSIVE = 1; + NETWORK = 2; +} + +enum HealthCheckerType { + HTTP = 0; + TCP = 1; + GRPC = 2; + REDIS = 3; +} + message HealthCheckEvent { - HealthCheckerType health_checker_type = 1 [(validate.rules).enum.defined_only = true]; - envoy.api.v2.core.Address host = 2; - string cluster_name = 3 [(validate.rules).string.min_bytes = 1]; + HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}]; + + api.v2.core.Address host = 2; + + string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; oneof event { option (validate.required) = true; @@ -43,22 +58,9 @@ message HealthCheckEvent { google.protobuf.Timestamp timestamp = 6; } -enum HealthCheckFailureType { - ACTIVE = 0; - PASSIVE = 1; - NETWORK = 2; -} - -enum HealthCheckerType { - HTTP = 0; - TCP = 1; - GRPC = 2; - REDIS = 3; -} - message HealthCheckEjectUnhealthy { // The type of failure that caused this ejection. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum.defined_only = true]; + HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; } message HealthCheckAddHealthy { @@ -70,7 +72,8 @@ message HealthCheckAddHealthy { message HealthCheckFailure { // The type of failure that caused this event. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum.defined_only = true]; + HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; + // Whether this event is the result of the first ever health check on a host. bool first_check = 2; } diff --git a/api/envoy/data/core/v3alpha/health_check_event.proto b/api/envoy/data/core/v3alpha/health_check_event.proto index c714743f7070..bdb2cfbdbc67 100644 --- a/api/envoy/data/core/v3alpha/health_check_event.proto +++ b/api/envoy/data/core/v3alpha/health_check_event.proto @@ -15,10 +15,25 @@ import "validate/validate.proto"; // [#protodoc-title: Health check logging events] // :ref:`Health check logging `. +enum HealthCheckFailureType { + ACTIVE = 0; + PASSIVE = 1; + NETWORK = 2; +} + +enum HealthCheckerType { + HTTP = 0; + TCP = 1; + GRPC = 2; + REDIS = 3; +} + message HealthCheckEvent { - HealthCheckerType health_checker_type = 1 [(validate.rules).enum.defined_only = true]; - envoy.api.v3alpha.core.Address host = 2; - string cluster_name = 3 [(validate.rules).string.min_bytes = 1]; + HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}]; + + api.v3alpha.core.Address host = 2; + + string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; oneof event { option (validate.required) = true; @@ -43,22 +58,9 @@ message HealthCheckEvent { google.protobuf.Timestamp timestamp = 6; } -enum HealthCheckFailureType { - ACTIVE = 0; - PASSIVE = 1; - NETWORK = 2; -} - -enum HealthCheckerType { - HTTP = 0; - TCP = 1; - GRPC = 2; - REDIS = 3; -} - message HealthCheckEjectUnhealthy { // The type of failure that caused this ejection. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum.defined_only = true]; + HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; } message HealthCheckAddHealthy { @@ -70,7 +72,8 @@ message HealthCheckAddHealthy { message HealthCheckFailure { // The type of failure that caused this event. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum.defined_only = true]; + HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; + // Whether this event is the result of the first ever health check on a host. bool first_check = 2; } diff --git a/api/envoy/data/tap/v2alpha/transport.proto b/api/envoy/data/tap/v2alpha/transport.proto index c3a3d8b8eb86..a304ac3bf7a9 100644 --- a/api/envoy/data/tap/v2alpha/transport.proto +++ b/api/envoy/data/tap/v2alpha/transport.proto @@ -1,9 +1,5 @@ syntax = "proto3"; -// [#protodoc-title: Transport tap data] -// Trace format for the tap transport socket extension. This dumps plain text read/write -// sequences on a socket. - package envoy.data.tap.v2alpha; option java_outer_classname = "TransportProto"; @@ -15,26 +11,27 @@ import "envoy/data/tap/v2alpha/common.proto"; import "google/protobuf/timestamp.proto"; +// [#protodoc-title: Transport tap data] +// Trace format for the tap transport socket extension. This dumps plain text read/write +// sequences on a socket. + // Connection properties. message Connection { // Local address. - envoy.api.v2.core.Address local_address = 2; + api.v2.core.Address local_address = 2; // Remote address. - envoy.api.v2.core.Address remote_address = 3; + api.v2.core.Address remote_address = 3; } // Event in a socket trace. message SocketEvent { - // Timestamp for event. - google.protobuf.Timestamp timestamp = 1; - // Data read by Envoy from the transport socket. message Read { + // TODO(htuch): Half-close for reads. + // Binary data read. Body data = 1; - - // TODO(htuch): Half-close for reads. } // Data written by Envoy to the transport socket. @@ -51,10 +48,15 @@ message SocketEvent { // TODO(mattklein123): Close event type. } + // Timestamp for event. + google.protobuf.Timestamp timestamp = 1; + // Read or write with content as bytes string. oneof event_selector { Read read = 2; + Write write = 3; + Closed closed = 4; } } diff --git a/api/envoy/data/tap/v2alpha/wrapper.proto b/api/envoy/data/tap/v2alpha/wrapper.proto index a49cd3189b4d..597b22f014df 100644 --- a/api/envoy/data/tap/v2alpha/wrapper.proto +++ b/api/envoy/data/tap/v2alpha/wrapper.proto @@ -1,16 +1,16 @@ syntax = "proto3"; -import "envoy/data/tap/v2alpha/http.proto"; -import "envoy/data/tap/v2alpha/transport.proto"; - -import "validate/validate.proto"; - package envoy.data.tap.v2alpha; option java_outer_classname = "WrapperProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; +import "envoy/data/tap/v2alpha/http.proto"; +import "envoy/data/tap/v2alpha/transport.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: Tap data wrappers] // Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for diff --git a/api/envoy/data/tap/v3alpha/transport.proto b/api/envoy/data/tap/v3alpha/transport.proto index e35f036f5d82..337962c95f0a 100644 --- a/api/envoy/data/tap/v3alpha/transport.proto +++ b/api/envoy/data/tap/v3alpha/transport.proto @@ -1,9 +1,5 @@ syntax = "proto3"; -// [#protodoc-title: Transport tap data] -// Trace format for the tap transport socket extension. This dumps plain text read/write -// sequences on a socket. - package envoy.data.tap.v3alpha; option java_outer_classname = "TransportProto"; @@ -15,26 +11,27 @@ import "envoy/data/tap/v3alpha/common.proto"; import "google/protobuf/timestamp.proto"; +// [#protodoc-title: Transport tap data] +// Trace format for the tap transport socket extension. This dumps plain text read/write +// sequences on a socket. + // Connection properties. message Connection { // Local address. - envoy.api.v3alpha.core.Address local_address = 2; + api.v3alpha.core.Address local_address = 2; // Remote address. - envoy.api.v3alpha.core.Address remote_address = 3; + api.v3alpha.core.Address remote_address = 3; } // Event in a socket trace. message SocketEvent { - // Timestamp for event. - google.protobuf.Timestamp timestamp = 1; - // Data read by Envoy from the transport socket. message Read { + // TODO(htuch): Half-close for reads. + // Binary data read. Body data = 1; - - // TODO(htuch): Half-close for reads. } // Data written by Envoy to the transport socket. @@ -51,10 +48,15 @@ message SocketEvent { // TODO(mattklein123): Close event type. } + // Timestamp for event. + google.protobuf.Timestamp timestamp = 1; + // Read or write with content as bytes string. oneof event_selector { Read read = 2; + Write write = 3; + Closed closed = 4; } } diff --git a/api/envoy/data/tap/v3alpha/wrapper.proto b/api/envoy/data/tap/v3alpha/wrapper.proto index 1aff052e90d1..de5cdc0f15b5 100644 --- a/api/envoy/data/tap/v3alpha/wrapper.proto +++ b/api/envoy/data/tap/v3alpha/wrapper.proto @@ -1,16 +1,16 @@ syntax = "proto3"; -import "envoy/data/tap/v3alpha/http.proto"; -import "envoy/data/tap/v3alpha/transport.proto"; - -import "validate/validate.proto"; - package envoy.data.tap.v3alpha; option java_outer_classname = "WrapperProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.data.tap.v3alpha"; +import "envoy/data/tap/v3alpha/http.proto"; +import "envoy/data/tap/v3alpha/transport.proto"; + +import "validate/validate.proto"; + // [#protodoc-title: Tap data wrappers] // Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for diff --git a/api/envoy/service/accesslog/v2/als.proto b/api/envoy/service/accesslog/v2/als.proto index c06199a2b208..e3022af83bc9 100644 --- a/api/envoy/service/accesslog/v2/als.proto +++ b/api/envoy/service/accesslog/v2/als.proto @@ -35,29 +35,29 @@ message StreamAccessLogsResponse { message StreamAccessLogsMessage { message Identifier { // The node sending the access log messages over the stream. - envoy.api.v2.core.Node node = 1 [(validate.rules).message.required = true]; + api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. - string log_name = 2 [(validate.rules).string.min_bytes = 1]; + string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; } - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - // Wrapper for batches of HTTP access log entries. message HTTPAccessLogEntries { - repeated envoy.data.accesslog.v2.HTTPAccessLogEntry log_entry = 1 - [(validate.rules).repeated .min_items = 1]; + repeated data.accesslog.v2.HTTPAccessLogEntry log_entry = 1 + [(validate.rules).repeated = {min_items: 1}]; } // Wrapper for batches of TCP access log entries. message TCPAccessLogEntries { - repeated envoy.data.accesslog.v2.TCPAccessLogEntry log_entry = 1 - [(validate.rules).repeated .min_items = 1]; + repeated data.accesslog.v2.TCPAccessLogEntry log_entry = 1 + [(validate.rules).repeated = {min_items: 1}]; } + // Identifier data that will only be sent in the first message on the stream. This is effectively + // structured metadata and is a performance optimization. + Identifier identifier = 1; + // Batches of log entries of a single type. Generally speaking, a given stream should only // ever include one type of log entry. oneof log_entries { diff --git a/api/envoy/service/accesslog/v3alpha/als.proto b/api/envoy/service/accesslog/v3alpha/als.proto index ad05b823d1e5..aafabec3e17e 100644 --- a/api/envoy/service/accesslog/v3alpha/als.proto +++ b/api/envoy/service/accesslog/v3alpha/als.proto @@ -35,29 +35,29 @@ message StreamAccessLogsResponse { message StreamAccessLogsMessage { message Identifier { // The node sending the access log messages over the stream. - envoy.api.v3alpha.core.Node node = 1 [(validate.rules).message.required = true]; + api.v3alpha.core.Node node = 1 [(validate.rules).message = {required: true}]; // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. - string log_name = 2 [(validate.rules).string.min_bytes = 1]; + string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; } - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - // Wrapper for batches of HTTP access log entries. message HTTPAccessLogEntries { - repeated envoy.data.accesslog.v3alpha.HTTPAccessLogEntry log_entry = 1 - [(validate.rules).repeated .min_items = 1]; + repeated data.accesslog.v3alpha.HTTPAccessLogEntry log_entry = 1 + [(validate.rules).repeated = {min_items: 1}]; } // Wrapper for batches of TCP access log entries. message TCPAccessLogEntries { - repeated envoy.data.accesslog.v3alpha.TCPAccessLogEntry log_entry = 1 - [(validate.rules).repeated .min_items = 1]; + repeated data.accesslog.v3alpha.TCPAccessLogEntry log_entry = 1 + [(validate.rules).repeated = {min_items: 1}]; } + // Identifier data that will only be sent in the first message on the stream. This is effectively + // structured metadata and is a performance optimization. + Identifier identifier = 1; + // Batches of log entries of a single type. Generally speaking, a given stream should only // ever include one type of log entry. oneof log_entries { diff --git a/api/envoy/service/auth/v2/attribute_context.proto b/api/envoy/service/auth/v2/attribute_context.proto index cfb71ec0ce5f..6e6986756af4 100644 --- a/api/envoy/service/auth/v2/attribute_context.proto +++ b/api/envoy/service/auth/v2/attribute_context.proto @@ -22,6 +22,17 @@ import "google/protobuf/timestamp.proto"; // Each attribute has a type and a name, which is logically defined as a proto message field // of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes // supported by Envoy authorization system. +// [#comment: The following items are left out of this proto +// Request.Auth field for jwt tokens +// Request.Api for api management +// Origin peer that originated the request +// Caching Protocol +// request_context return values to inject back into the filter chain +// peer.claims -- from X.509 extensions +// Configuration +// - field mask to send +// - which return values from request_context are copied back +// - which return values are copied into request_headers] message AttributeContext { // This message defines attributes for a node that handles a network request. // The node can be either a service or an application that sends, forwards, @@ -30,7 +41,7 @@ message AttributeContext { message Peer { // The address of the peer, this is typically the IP address. // It can also be UDS path, or others. - envoy.api.v2.core.Address address = 1; + api.v2.core.Address address = 1; // The canonical service name of the peer. // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster @@ -63,8 +74,6 @@ message AttributeContext { // Represents an HTTP request or an HTTP-like request. HttpRequest http = 2; - - // More request types are added here as necessary. } // This message defines attributes for an HTTP request. @@ -135,17 +144,5 @@ message AttributeContext { map context_extensions = 10; // Dynamic metadata associated with the request. - envoy.api.v2.core.Metadata metadata_context = 11; + api.v2.core.Metadata metadata_context = 11; } - -// The following items are left out of this proto -// Request.Auth field for jwt tokens -// Request.Api for api management -// Origin peer that originated the request -// Caching Protocol -// request_context return values to inject back into the filter chain -// peer.claims -- from X.509 extensions -// Configuration -// - field mask to send -// - which return values from request_context are copied back -// - which return values are copied into request_headers diff --git a/api/envoy/service/auth/v2/external_auth.proto b/api/envoy/service/auth/v2/external_auth.proto index 8a3d4f1a629e..7b2144cf8b8c 100644 --- a/api/envoy/service/auth/v2/external_auth.proto +++ b/api/envoy/service/auth/v2/external_auth.proto @@ -8,10 +8,11 @@ option java_package = "io.envoyproxy.envoy.service.auth.v2"; option java_generic_services = true; import "envoy/api/v2/core/base.proto"; -import "envoy/type/http_status.proto"; import "envoy/service/auth/v2/attribute_context.proto"; +import "envoy/type/http_status.proto"; import "google/rpc/status.proto"; + import "validate/validate.proto"; // [#protodoc-title: Authorization Service ] @@ -24,7 +25,8 @@ import "validate/validate.proto"; service Authorization { // Performs authorization check based on the attributes associated with the // incoming request, and returns status `OK` or not `OK`. - rpc Check(CheckRequest) returns (CheckResponse); + rpc Check(CheckRequest) returns (CheckResponse) { + } } message CheckRequest { @@ -36,11 +38,11 @@ message CheckRequest { message DeniedHttpResponse { // This field allows the authorization service to send a HTTP response status // code to the downstream client other than 403 (Forbidden). - envoy.type.HttpStatus status = 1 [(validate.rules).message.required = true]; + type.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers // to the downstream client. - repeated envoy.api.v2.core.HeaderValueOption headers = 2; + repeated api.v2.core.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data // to the downstream client. @@ -55,7 +57,7 @@ message OkHttpResponse { // the filter will append the correspondent header value to the matched request header. Note that // by Leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. - repeated envoy.api.v2.core.HeaderValueOption headers = 2; + repeated api.v2.core.HeaderValueOption headers = 2; } // Intended for gRPC and Network Authorization servers `only`. diff --git a/api/envoy/service/auth/v3alpha/attribute_context.proto b/api/envoy/service/auth/v3alpha/attribute_context.proto index 95ac3428fc49..ee5ca3d27afd 100644 --- a/api/envoy/service/auth/v3alpha/attribute_context.proto +++ b/api/envoy/service/auth/v3alpha/attribute_context.proto @@ -22,6 +22,17 @@ import "google/protobuf/timestamp.proto"; // Each attribute has a type and a name, which is logically defined as a proto message field // of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes // supported by Envoy authorization system. +// [#comment: The following items are left out of this proto +// Request.Auth field for jwt tokens +// Request.Api for api management +// Origin peer that originated the request +// Caching Protocol +// request_context return values to inject back into the filter chain +// peer.claims -- from X.509 extensions +// Configuration +// - field mask to send +// - which return values from request_context are copied back +// - which return values are copied into request_headers] message AttributeContext { // This message defines attributes for a node that handles a network request. // The node can be either a service or an application that sends, forwards, @@ -30,7 +41,7 @@ message AttributeContext { message Peer { // The address of the peer, this is typically the IP address. // It can also be UDS path, or others. - envoy.api.v3alpha.core.Address address = 1; + api.v3alpha.core.Address address = 1; // The canonical service name of the peer. // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster @@ -63,8 +74,6 @@ message AttributeContext { // Represents an HTTP request or an HTTP-like request. HttpRequest http = 2; - - // More request types are added here as necessary. } // This message defines attributes for an HTTP request. @@ -135,17 +144,5 @@ message AttributeContext { map context_extensions = 10; // Dynamic metadata associated with the request. - envoy.api.v3alpha.core.Metadata metadata_context = 11; + api.v3alpha.core.Metadata metadata_context = 11; } - -// The following items are left out of this proto -// Request.Auth field for jwt tokens -// Request.Api for api management -// Origin peer that originated the request -// Caching Protocol -// request_context return values to inject back into the filter chain -// peer.claims -- from X.509 extensions -// Configuration -// - field mask to send -// - which return values from request_context are copied back -// - which return values are copied into request_headers diff --git a/api/envoy/service/auth/v3alpha/external_auth.proto b/api/envoy/service/auth/v3alpha/external_auth.proto index 0130040c1409..81cc46294d3b 100644 --- a/api/envoy/service/auth/v3alpha/external_auth.proto +++ b/api/envoy/service/auth/v3alpha/external_auth.proto @@ -8,10 +8,11 @@ option java_package = "io.envoyproxy.envoy.service.auth.v3alpha"; option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; -import "envoy/type/http_status.proto"; import "envoy/service/auth/v3alpha/attribute_context.proto"; +import "envoy/type/http_status.proto"; import "google/rpc/status.proto"; + import "validate/validate.proto"; // [#protodoc-title: Authorization Service ] @@ -24,7 +25,8 @@ import "validate/validate.proto"; service Authorization { // Performs authorization check based on the attributes associated with the // incoming request, and returns status `OK` or not `OK`. - rpc Check(CheckRequest) returns (CheckResponse); + rpc Check(CheckRequest) returns (CheckResponse) { + } } message CheckRequest { @@ -36,11 +38,11 @@ message CheckRequest { message DeniedHttpResponse { // This field allows the authorization service to send a HTTP response status // code to the downstream client other than 403 (Forbidden). - envoy.type.HttpStatus status = 1 [(validate.rules).message.required = true]; + type.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers // to the downstream client. - repeated envoy.api.v3alpha.core.HeaderValueOption headers = 2; + repeated api.v3alpha.core.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data // to the downstream client. @@ -55,7 +57,7 @@ message OkHttpResponse { // the filter will append the correspondent header value to the matched request header. Note that // by Leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. - repeated envoy.api.v3alpha.core.HeaderValueOption headers = 2; + repeated api.v3alpha.core.HeaderValueOption headers = 2; } // Intended for gRPC and Network Authorization servers `only`. diff --git a/api/envoy/service/discovery/v2/ads.proto b/api/envoy/service/discovery/v2/ads.proto index 45a7407f0c44..63b129069ede 100644 --- a/api/envoy/service/discovery/v2/ads.proto +++ b/api/envoy/service/discovery/v2/ads.proto @@ -9,11 +9,6 @@ option java_generic_services = true; import "envoy/api/v2/discovery.proto"; -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message AdsDummy { -} - // [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, // and listeners are retained in the package `envoy.api.v2` for backwards // compatibility with existing management servers. New development in discovery @@ -27,11 +22,16 @@ message AdsDummy { // the multiplexed singleton APIs at the Envoy instance and management server. service AggregatedDiscoveryService { // This is a gRPC-only API. - rpc StreamAggregatedResources(stream envoy.api.v2.DiscoveryRequest) - returns (stream envoy.api.v2.DiscoveryResponse) { + rpc StreamAggregatedResources(stream api.v2.DiscoveryRequest) + returns (stream api.v2.DiscoveryResponse) { } - rpc DeltaAggregatedResources(stream envoy.api.v2.DeltaDiscoveryRequest) - returns (stream envoy.api.v2.DeltaDiscoveryResponse) { + rpc DeltaAggregatedResources(stream api.v2.DeltaDiscoveryRequest) + returns (stream api.v2.DeltaDiscoveryResponse) { } } + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message AdsDummy { +} diff --git a/api/envoy/service/discovery/v2/rtds.proto b/api/envoy/service/discovery/v2/rtds.proto index c8b53d670fe1..4dfe6f2a6645 100644 --- a/api/envoy/service/discovery/v2/rtds.proto +++ b/api/envoy/service/discovery/v2/rtds.proto @@ -17,22 +17,16 @@ import "validate/validate.proto"; // [#protodoc-title: Runtime Discovery Service (RTDS)] // RTDS :ref:`configuration overview ` -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message RtdsDummy { -} - // Discovery service for Runtime resources. service RuntimeDiscoveryService { - rpc StreamRuntime(stream envoy.api.v2.DiscoveryRequest) - returns (stream envoy.api.v2.DiscoveryResponse) { + rpc StreamRuntime(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { } - rpc DeltaRuntime(stream envoy.api.v2.DeltaDiscoveryRequest) - returns (stream envoy.api.v2.DeltaDiscoveryResponse) { + rpc DeltaRuntime(stream api.v2.DeltaDiscoveryRequest) + returns (stream api.v2.DeltaDiscoveryResponse) { } - rpc FetchRuntime(envoy.api.v2.DiscoveryRequest) returns (envoy.api.v2.DiscoveryResponse) { + rpc FetchRuntime(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { option (google.api.http) = { post: "/v2/discovery:runtime" body: "*" @@ -40,10 +34,16 @@ service RuntimeDiscoveryService { } } +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message RtdsDummy { +} + // RTDS resource type. This describes a layer in the runtime virtual filesystem. message Runtime { // Runtime resource name. This makes the Runtime a self-describing xDS // resource. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + google.protobuf.Struct layer = 2; } diff --git a/api/envoy/service/discovery/v3alpha/ads.proto b/api/envoy/service/discovery/v3alpha/ads.proto index 251c51301a16..329df8b58511 100644 --- a/api/envoy/service/discovery/v3alpha/ads.proto +++ b/api/envoy/service/discovery/v3alpha/ads.proto @@ -9,11 +9,6 @@ option java_generic_services = true; import "envoy/api/v3alpha/discovery.proto"; -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message AdsDummy { -} - // [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, // and listeners are retained in the package `envoy.api.v3alpha` for backwards // compatibility with existing management servers. New development in discovery @@ -27,11 +22,16 @@ message AdsDummy { // the multiplexed singleton APIs at the Envoy instance and management server. service AggregatedDiscoveryService { // This is a gRPC-only API. - rpc StreamAggregatedResources(stream envoy.api.v3alpha.DiscoveryRequest) - returns (stream envoy.api.v3alpha.DiscoveryResponse) { + rpc StreamAggregatedResources(stream api.v3alpha.DiscoveryRequest) + returns (stream api.v3alpha.DiscoveryResponse) { } - rpc DeltaAggregatedResources(stream envoy.api.v3alpha.DeltaDiscoveryRequest) - returns (stream envoy.api.v3alpha.DeltaDiscoveryResponse) { + rpc DeltaAggregatedResources(stream api.v3alpha.DeltaDiscoveryRequest) + returns (stream api.v3alpha.DeltaDiscoveryResponse) { } } + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message AdsDummy { +} diff --git a/api/envoy/service/discovery/v3alpha/rtds.proto b/api/envoy/service/discovery/v3alpha/rtds.proto index d4184ab6e197..c0ad2b9c41bc 100644 --- a/api/envoy/service/discovery/v3alpha/rtds.proto +++ b/api/envoy/service/discovery/v3alpha/rtds.proto @@ -17,23 +17,17 @@ import "validate/validate.proto"; // [#protodoc-title: Runtime Discovery Service (RTDS)] // RTDS :ref:`configuration overview ` -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message RtdsDummy { -} - // Discovery service for Runtime resources. service RuntimeDiscoveryService { - rpc StreamRuntime(stream envoy.api.v3alpha.DiscoveryRequest) - returns (stream envoy.api.v3alpha.DiscoveryResponse) { + rpc StreamRuntime(stream api.v3alpha.DiscoveryRequest) + returns (stream api.v3alpha.DiscoveryResponse) { } - rpc DeltaRuntime(stream envoy.api.v3alpha.DeltaDiscoveryRequest) - returns (stream envoy.api.v3alpha.DeltaDiscoveryResponse) { + rpc DeltaRuntime(stream api.v3alpha.DeltaDiscoveryRequest) + returns (stream api.v3alpha.DeltaDiscoveryResponse) { } - rpc FetchRuntime(envoy.api.v3alpha.DiscoveryRequest) - returns (envoy.api.v3alpha.DiscoveryResponse) { + rpc FetchRuntime(api.v3alpha.DiscoveryRequest) returns (api.v3alpha.DiscoveryResponse) { option (google.api.http) = { post: "/v3alpha/discovery:runtime" body: "*" @@ -41,10 +35,16 @@ service RuntimeDiscoveryService { } } +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message RtdsDummy { +} + // RTDS resource type. This describes a layer in the runtime virtual filesystem. message Runtime { // Runtime resource name. This makes the Runtime a self-describing xDS // resource. - string name = 1 [(validate.rules).string.min_bytes = 1]; + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + google.protobuf.Struct layer = 2; } diff --git a/api/envoy/service/ratelimit/v2/rls.proto b/api/envoy/service/ratelimit/v2/rls.proto index 328bb547d630..ce52826a80b6 100644 --- a/api/envoy/service/ratelimit/v2/rls.proto +++ b/api/envoy/service/ratelimit/v2/rls.proto @@ -5,6 +5,7 @@ package envoy.service.ratelimit.v2; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.ratelimit.v2"; +option java_generic_services = true; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/ratelimit/ratelimit.proto"; @@ -34,7 +35,7 @@ message RateLimitRequest { // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is // processed by the service (see below). If any of the descriptors are over limit, the entire // request is considered to be over limit. - repeated envoy.api.v2.ratelimit.RateLimitDescriptor descriptors = 2; + repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 2; // Rate limit requests can optionally specify the number of hits a request adds to the matched // limit. If the value is not set in the message, a request increases the matched limit by 1. @@ -46,8 +47,10 @@ message RateLimitResponse { enum Code { // The response code is not known. UNKNOWN = 0; + // The response code to notify that the number of requests are under limit. OK = 1; + // The response code to notify that the number of requests are over limit. OVER_LIMIT = 2; } @@ -57,18 +60,23 @@ message RateLimitResponse { enum Unit { // The time unit is not known. UNKNOWN = 0; + // The time unit representing a second. SECOND = 1; + // The time unit representing a minute. MINUTE = 2; + // The time unit representing an hour. HOUR = 3; + // The time unit representing a day. DAY = 4; } // The number of requests per unit of time. uint32 requests_per_unit = 1; + // The unit of time. Unit unit = 2; } @@ -76,8 +84,10 @@ message RateLimitResponse { message DescriptorStatus { // The response code for an individual descriptor. Code code = 1; + // The current limit as configured by the server. Useful for debugging, etc. RateLimit current_limit = 2; + // The limit remaining in the current time unit. uint32 limit_remaining = 3; } @@ -85,10 +95,12 @@ message RateLimitResponse { // The overall response code which takes into account all of the descriptors that were passed // in the RateLimitRequest message. Code overall_code = 1; + // A list of DescriptorStatus messages which matches the length of the descriptor list passed // in the RateLimitRequest. This can be used by the caller to determine which individual // descriptors failed and/or what the currently configured limits are for all of them. repeated DescriptorStatus statuses = 2; + // A list of headers to add to the response - repeated envoy.api.v2.core.HeaderValue headers = 3; + repeated api.v2.core.HeaderValue headers = 3; } diff --git a/api/envoy/service/ratelimit/v3alpha/rls.proto b/api/envoy/service/ratelimit/v3alpha/rls.proto index 57a3ee98de94..910b24c39b68 100644 --- a/api/envoy/service/ratelimit/v3alpha/rls.proto +++ b/api/envoy/service/ratelimit/v3alpha/rls.proto @@ -5,6 +5,7 @@ package envoy.service.ratelimit.v3alpha; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.service.ratelimit.v3alpha"; +option java_generic_services = true; import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/ratelimit/ratelimit.proto"; @@ -34,7 +35,7 @@ message RateLimitRequest { // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is // processed by the service (see below). If any of the descriptors are over limit, the entire // request is considered to be over limit. - repeated envoy.api.v3alpha.ratelimit.RateLimitDescriptor descriptors = 2; + repeated api.v3alpha.ratelimit.RateLimitDescriptor descriptors = 2; // Rate limit requests can optionally specify the number of hits a request adds to the matched // limit. If the value is not set in the message, a request increases the matched limit by 1. @@ -46,8 +47,10 @@ message RateLimitResponse { enum Code { // The response code is not known. UNKNOWN = 0; + // The response code to notify that the number of requests are under limit. OK = 1; + // The response code to notify that the number of requests are over limit. OVER_LIMIT = 2; } @@ -57,18 +60,23 @@ message RateLimitResponse { enum Unit { // The time unit is not known. UNKNOWN = 0; + // The time unit representing a second. SECOND = 1; + // The time unit representing a minute. MINUTE = 2; + // The time unit representing an hour. HOUR = 3; + // The time unit representing a day. DAY = 4; } // The number of requests per unit of time. uint32 requests_per_unit = 1; + // The unit of time. Unit unit = 2; } @@ -76,8 +84,10 @@ message RateLimitResponse { message DescriptorStatus { // The response code for an individual descriptor. Code code = 1; + // The current limit as configured by the server. Useful for debugging, etc. RateLimit current_limit = 2; + // The limit remaining in the current time unit. uint32 limit_remaining = 3; } @@ -85,10 +95,12 @@ message RateLimitResponse { // The overall response code which takes into account all of the descriptors that were passed // in the RateLimitRequest message. Code overall_code = 1; + // A list of DescriptorStatus messages which matches the length of the descriptor list passed // in the RateLimitRequest. This can be used by the caller to determine which individual // descriptors failed and/or what the currently configured limits are for all of them. repeated DescriptorStatus statuses = 2; + // A list of headers to add to the response - repeated envoy.api.v3alpha.core.HeaderValue headers = 3; + repeated api.v3alpha.core.HeaderValue headers = 3; } diff --git a/api/envoy/service/tap/v2alpha/common.proto b/api/envoy/service/tap/v2alpha/common.proto index 54437b305469..2ebf2ae17b6d 100644 --- a/api/envoy/service/tap/v2alpha/common.proto +++ b/api/envoy/service/tap/v2alpha/common.proto @@ -1,30 +1,32 @@ syntax = "proto3"; -import "envoy/api/v2/route/route.proto"; +package envoy.service.tap.v2alpha; + +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; + import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/grpc_service.proto"; +import "envoy/api/v2/route/route.proto"; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -package envoy.service.tap.v2alpha; - -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; - // [#protodoc-title: Common tap configuration] // Tap configuration. message TapConfig { + // [#comment:TODO(mattklein123): Rate limiting] + // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message.required = true]; + MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. - OutputConfig output_config = 2 [(validate.rules).message.required = true]; + OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for // which the tap matching is enabled. When not enabled, the request\connection will not be @@ -34,9 +36,7 @@ message TapConfig { // // This field defaults to 100/:ref:`HUNDRED // `. - envoy.api.v2.core.RuntimeFractionalPercent tap_enabled = 3; - - // [#comment:TODO(mattklein123): Rate limiting] + api.v2.core.RuntimeFractionalPercent tap_enabled = 3; } // Tap match configuration. This is a recursive structure which allows complex nested match @@ -45,7 +45,7 @@ message MatchPredicate { // A set of match configurations used for logical operations. message MatchSet { // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated .min_items = 2]; + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { @@ -63,7 +63,7 @@ message MatchPredicate { MatchPredicate not_match = 3; // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool.const = true]; + bool any_match = 4 [(validate.rules).bool = {const: true}]; // HTTP request headers match configuration. HttpHeadersMatch http_request_headers_match = 5; @@ -89,7 +89,7 @@ message HttpHeadersMatch { message OutputConfig { // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple // sink types are supported this constraint will be relaxed. - repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1, max_items: 1}]; + repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; // For buffered tapping, the maximum amount of received body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated @@ -153,7 +153,7 @@ message OutputSink { } // Sink output format. - Format format = 1 [(validate.rules).enum.defined_only = true]; + Format format = 1 [(validate.rules).enum = {defined_only: true}]; oneof output_sink_type { option (validate.required) = true; @@ -186,7 +186,7 @@ message FilePerTapSink { // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC @@ -196,5 +196,5 @@ message StreamingGrpcSink { string tap_id = 1; // The gRPC server that hosts the Tap Sink Service. - envoy.api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; + api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/service/tap/v3alpha/common.proto b/api/envoy/service/tap/v3alpha/common.proto index 7c375d913d7a..2302e64b7568 100644 --- a/api/envoy/service/tap/v3alpha/common.proto +++ b/api/envoy/service/tap/v3alpha/common.proto @@ -1,30 +1,32 @@ syntax = "proto3"; -import "envoy/api/v3alpha/route/route.proto"; +package envoy.service.tap.v3alpha; + +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.service.tap.v3alpha"; + import "envoy/api/v3alpha/core/base.proto"; import "envoy/api/v3alpha/core/grpc_service.proto"; +import "envoy/api/v3alpha/route/route.proto"; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -package envoy.service.tap.v3alpha; - -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.service.tap.v3alpha"; - // [#protodoc-title: Common tap configuration] // Tap configuration. message TapConfig { + // [#comment:TODO(mattklein123): Rate limiting] + // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message.required = true]; + MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. - OutputConfig output_config = 2 [(validate.rules).message.required = true]; + OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for // which the tap matching is enabled. When not enabled, the request\connection will not be @@ -34,9 +36,7 @@ message TapConfig { // // This field defaults to 100/:ref:`HUNDRED // `. - envoy.api.v3alpha.core.RuntimeFractionalPercent tap_enabled = 3; - - // [#comment:TODO(mattklein123): Rate limiting] + api.v3alpha.core.RuntimeFractionalPercent tap_enabled = 3; } // Tap match configuration. This is a recursive structure which allows complex nested match @@ -45,7 +45,7 @@ message MatchPredicate { // A set of match configurations used for logical operations. message MatchSet { // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated .min_items = 2]; + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { @@ -63,7 +63,7 @@ message MatchPredicate { MatchPredicate not_match = 3; // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool.const = true]; + bool any_match = 4 [(validate.rules).bool = {const: true}]; // HTTP request headers match configuration. HttpHeadersMatch http_request_headers_match = 5; @@ -89,7 +89,7 @@ message HttpHeadersMatch { message OutputConfig { // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple // sink types are supported this constraint will be relaxed. - repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1, max_items: 1}]; + repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; // For buffered tapping, the maximum amount of received body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated @@ -153,7 +153,7 @@ message OutputSink { } // Sink output format. - Format format = 1 [(validate.rules).enum.defined_only = true]; + Format format = 1 [(validate.rules).enum = {defined_only: true}]; oneof output_sink_type { option (validate.required) = true; @@ -186,7 +186,7 @@ message FilePerTapSink { // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string.min_bytes = 1]; + string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC @@ -196,5 +196,5 @@ message StreamingGrpcSink { string tap_id = 1; // The gRPC server that hosts the Tap Sink Service. - envoy.api.v3alpha.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; + api.v3alpha.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/type/http_status.proto b/api/envoy/type/http_status.proto index 3f89ada09776..acde59c49eb2 100644 --- a/api/envoy/type/http_status.proto +++ b/api/envoy/type/http_status.proto @@ -18,68 +18,118 @@ enum StatusCode { Continue = 100; OK = 200; + Created = 201; + Accepted = 202; + NonAuthoritativeInformation = 203; + NoContent = 204; + ResetContent = 205; + PartialContent = 206; + MultiStatus = 207; + AlreadyReported = 208; + IMUsed = 226; MultipleChoices = 300; + MovedPermanently = 301; + Found = 302; + SeeOther = 303; + NotModified = 304; + UseProxy = 305; + TemporaryRedirect = 307; + PermanentRedirect = 308; BadRequest = 400; + Unauthorized = 401; + PaymentRequired = 402; + Forbidden = 403; + NotFound = 404; + MethodNotAllowed = 405; + NotAcceptable = 406; + ProxyAuthenticationRequired = 407; + RequestTimeout = 408; + Conflict = 409; + Gone = 410; + LengthRequired = 411; + PreconditionFailed = 412; + PayloadTooLarge = 413; + URITooLong = 414; + UnsupportedMediaType = 415; + RangeNotSatisfiable = 416; + ExpectationFailed = 417; + MisdirectedRequest = 421; + UnprocessableEntity = 422; + Locked = 423; + FailedDependency = 424; + UpgradeRequired = 426; + PreconditionRequired = 428; + TooManyRequests = 429; + RequestHeaderFieldsTooLarge = 431; InternalServerError = 500; + NotImplemented = 501; + BadGateway = 502; + ServiceUnavailable = 503; + GatewayTimeout = 504; + HTTPVersionNotSupported = 505; + VariantAlsoNegotiates = 506; + InsufficientStorage = 507; + LoopDetected = 508; + NotExtended = 510; + NetworkAuthenticationRequired = 511; } // HTTP status. message HttpStatus { // Supplies HTTP response code. - StatusCode code = 1 - [(validate.rules).enum = {not_in: [0]}, (validate.rules).enum.defined_only = true]; + StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; } diff --git a/api/envoy/type/matcher/metadata.proto b/api/envoy/type/matcher/metadata.proto index 56b69eae5968..d2936a5c4317 100644 --- a/api/envoy/type/matcher/metadata.proto +++ b/api/envoy/type/matcher/metadata.proto @@ -79,16 +79,16 @@ message MetadataMatcher { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string.min_bytes = 1]; + string key = 1 [(validate.rules).string = {min_bytes: 1}]; } } // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string.min_bytes = 1]; + string filter = 1 [(validate.rules).string = {min_bytes: 1}]; // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated .min_items = 1]; + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; // The MetadataMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message.required = true]; + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/type/matcher/number.proto b/api/envoy/type/matcher/number.proto index 5c8cec7bcbdc..09eb811606aa 100644 --- a/api/envoy/type/matcher/number.proto +++ b/api/envoy/type/matcher/number.proto @@ -19,7 +19,7 @@ message DoubleMatcher { // If specified, the input double value must be in the range specified here. // Note: The range is using half-open interval semantics [start, end). - envoy.type.DoubleRange range = 1; + DoubleRange range = 1; // If specified, the input double value must be equal to the value specified here. double exact = 2; diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto index cf6343c9ac51..98819364d9e2 100644 --- a/api/envoy/type/matcher/regex.proto +++ b/api/envoy/type/matcher/regex.proto @@ -7,6 +7,7 @@ option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.type.matcher"; import "google/protobuf/wrappers.proto"; + import "validate/validate.proto"; // [#protodoc-title: RegexMatcher] @@ -28,9 +29,9 @@ message RegexMatcher { option (validate.required) = true; // Google's RE2 regex engine. - GoogleRE2 google_re2 = 1 [(validate.rules).message.required = true]; + GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; } // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string.min_bytes = 1]; + string regex = 2 [(validate.rules).string = {min_bytes: 1}]; } diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto index 986e393be154..f926af343fd4 100644 --- a/api/envoy/type/matcher/string.proto +++ b/api/envoy/type/matcher/string.proto @@ -30,7 +30,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string.min_bytes = 1]; + string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -38,7 +38,7 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string.min_bytes = 1]; + string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; // The input string must match the regular expression specified here. // The regex grammar is defined `here @@ -46,21 +46,21 @@ message StringMatcher { // // Examples: // - // * The regex *\d{3}* matches the value *123* - // * The regex *\d{3}* does not match the value *1234* - // * The regex *\d{3}* does not match the value *123.456* + // * The regex ``\d{3}`` matches the value *123* + // * The regex ``\d{3}`` does not match the value *1234* + // * The regex ``\d{3}`` does not match the value *123.456* // // .. attention:: // This field has been deprecated in favor of `safe_regex` as it is not safe for use with // untrusted input in all cases. - string regex = 4 [(validate.rules).string.max_bytes = 1024, deprecated = true]; + string regex = 4 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; // The input string must match the regular expression specified here. - RegexMatcher safe_regex = 5 [(validate.rules).message.required = true]; + RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; } } // Specifies a list of ways to match a string. message ListStringMatcher { - repeated StringMatcher patterns = 1 [(validate.rules).repeated .min_items = 1]; + repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; } diff --git a/api/envoy/type/percent.proto b/api/envoy/type/percent.proto index c577093eea0a..6d0868fd0ede 100644 --- a/api/envoy/type/percent.proto +++ b/api/envoy/type/percent.proto @@ -12,7 +12,7 @@ import "validate/validate.proto"; // Identifies a percentage, in the range [0.0, 100.0]. message Percent { - double value = 1 [(validate.rules).double = {gte: 0, lte: 100}]; + double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; } // A fractional percentage is used in cases in which for performance reasons performing floating @@ -22,9 +22,6 @@ message Percent { // * **Example**: 1/100 = 1%. // * **Example**: 3/10000 = 0.03%. message FractionalPercent { - // Specifies the numerator. Defaults to 0. - uint32 numerator = 1; - // Fraction percentages support several fixed denominator values. enum DenominatorType { // 100. @@ -43,7 +40,10 @@ message FractionalPercent { MILLION = 2; } + // Specifies the numerator. Defaults to 0. + uint32 numerator = 1; + // Specifies the denominator. If the denominator specified is less than the numerator, the final // fractional percentage is capped at 1 (100%). - DenominatorType denominator = 2 [(validate.rules).enum.defined_only = true]; + DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/test/build/go_build_test.go b/api/test/build/go_build_test.go index c5c15becff35..638ef478b8c7 100644 --- a/api/test/build/go_build_test.go +++ b/api/test/build/go_build_test.go @@ -3,14 +3,14 @@ package go_build_test import ( "testing" - _ "github.com/envoyproxy/data-plane-api/api/envoy/api/v2" - _ "github.com/envoyproxy/data-plane-api/api/envoy/api/v2/auth" - _ "github.com/envoyproxy/data-plane-api/api/envoy/config/bootstrap/v2" - _ "github.com/envoyproxy/data-plane-api/api/envoy/service/accesslog/v2" - _ "github.com/envoyproxy/data-plane-api/api/envoy/service/discovery/v2" - _ "github.com/envoyproxy/data-plane-api/api/envoy/service/metrics/v2" - _ "github.com/envoyproxy/data-plane-api/api/envoy/service/ratelimit/v2" - _ "github.com/envoyproxy/data-plane-api/api/envoy/service/trace/v2" + _ "github.com/envoyproxy/go-control-plane/envoy/api/v2" + _ "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth" + _ "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v2" + _ "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v2" + _ "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" + _ "github.com/envoyproxy/go-control-plane/envoy/service/metrics/v2" + _ "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + _ "github.com/envoyproxy/go-control-plane/envoy/service/trace/v2" ) func TestNoop(t *testing.T) { diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl index 8a9d396dcfbc..9c05b488716f 100644 --- a/bazel/envoy_binary.bzl +++ b/bazel/envoy_binary.bzl @@ -19,7 +19,8 @@ def envoy_cc_binary( repository = "", stamped = False, deps = [], - linkopts = []): + linkopts = [], + tags = []): if not linkopts: linkopts = _envoy_linkopts() if stamped: @@ -38,6 +39,7 @@ def envoy_cc_binary( malloc = tcmalloc_external_dep(repository), stamp = 1, deps = deps, + tags = tags, ) # Select the given values if exporting is enabled in the current build. diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 73fdff21dc2d..9e10b95e8b8c 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -93,6 +93,7 @@ def envoy_cc_fuzz_test( repository + "//bazel:dynamic_stdlib", ], repository = repository, + tags = tags, **kwargs ) native.cc_test( @@ -155,9 +156,10 @@ def envoy_cc_test( coverage = True, local = False, size = "medium"): - test_lib_tags = [] if coverage: - test_lib_tags.append("coverage_test_lib") + coverage_tags = tags + ["coverage_test_lib"] + else: + coverage_tags = tags _envoy_cc_test_infrastructure_library( name = name + "_lib_internal_only", srcs = srcs, @@ -165,11 +167,13 @@ def envoy_cc_test( external_deps = external_deps, deps = deps + [repository + "//test/test_common:printers_includes"], repository = repository, - tags = test_lib_tags, + tags = coverage_tags, copts = copts, # Allow public visibility so these can be consumed in coverage tests in external projects. visibility = ["//visibility:public"], ) + if coverage: + coverage_tags = tags + ["coverage_test"] native.cc_test( name = name, copts = envoy_copts(repository, test = True) + copts, @@ -183,7 +187,7 @@ def envoy_cc_test( # from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51 # 2 - by default, mocks act as StrictMocks. args = args + ["--gmock_default_mock_behavior=2"], - tags = tags + ["coverage_test"], + tags = coverage_tags, local = local, shard_count = shard_count, size = size, @@ -254,6 +258,7 @@ def envoy_sh_test( srcs = [], data = [], coverage = True, + tags = [], **kargs): if coverage: test_runner_cc = name + "_test_runner.cc" @@ -268,7 +273,7 @@ def envoy_sh_test( name = name + "_lib", srcs = [test_runner_cc], data = srcs + data, - tags = ["coverage_test_lib"], + tags = tags + ["coverage_test_lib"], deps = ["//test/test_common:environment_lib"], ) native.sh_test( @@ -276,5 +281,6 @@ def envoy_sh_test( srcs = ["//bazel:sh_test_wrapper.sh"], data = srcs + data, args = srcs, + tags = tags, **kargs ) diff --git a/bazel/protobuf.patch b/bazel/protobuf.patch index d51b67e92457..3a26bb356f97 100644 --- a/bazel/protobuf.patch +++ b/bazel/protobuf.patch @@ -1,28 +1,8 @@ -diff --git a/src/google/protobuf/stubs/strutil.cc b/src/google/protobuf/stubs/strutil.cc -index 3844fa6b8b..5486887295 100644 ---- a/src/google/protobuf/stubs/strutil.cc -+++ b/src/google/protobuf/stubs/strutil.cc -@@ -1065,10 +1065,12 @@ char* FastUInt32ToBufferLeft(uint32 u, char* buffer) { - } - - char* FastInt32ToBufferLeft(int32 i, char* buffer) { -- uint32 u = i; -+ uint32 u = 0; - if (i < 0) { - *buffer++ = '-'; -- u = -i; -+ u -= i; -+ } else { -+ u = i; - } - return FastUInt32ToBufferLeft(u, buffer); - } - diff --git a/BUILD b/BUILD index 6665de94..55f28582 100644 --- a/BUILD +++ b/BUILD -@@ -19,6 +19,6 @@ config_setting( +@@ -19,7 +19,7 @@ config_setting( # ZLIB configuration ################################################################################ @@ -30,4 +10,4 @@ index 6665de94..55f28582 100644 +ZLIB_DEPS = ["//external:zlib"] ################################################################################ - # Protobuf Runtime Library \ No newline at end of file + # Protobuf Runtime Library diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 4f2a28fab99d..efc585e97d0a 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -107,8 +107,9 @@ def envoy_dependencies(skip_targets = []): if "envoy_build_config" not in native.existing_rules().keys(): _default_envoy_build_config(name = "envoy_build_config") - # Setup rules_foreign_cc + # Setup external Bazel rules _foreign_cc_dependencies() + _rules_proto_dependencies() # Binding to an alias pointing to the selected version of BoringSSL: # - BoringSSL FIPS from @boringssl_fips//:ssl, @@ -317,6 +318,12 @@ def _net_zlib(): actual = "@envoy//bazel/foreign_cc:zlib", ) + # Bind for grpc. + native.bind( + name = "madler_zlib", + actual = "@envoy//bazel/foreign_cc:zlib", + ) + def _com_google_cel_cpp(): _repository_impl("com_google_cel_cpp") @@ -412,6 +419,12 @@ def _com_google_absl(): name = "abseil_base", actual = "@com_google_absl//absl/base:base", ) + + # Bind for grpc. + native.bind( + name = "absl-base", + actual = "@com_google_absl//absl/base", + ) native.bind( name = "abseil_flat_hash_map", actual = "@com_google_absl//absl/container:flat_hash_map", @@ -480,6 +493,12 @@ def _com_google_absl(): actual = "@com_google_absl//absl/time:time", ) + # Bind for grpc. + native.bind( + name = "absl-time", + actual = "@com_google_absl//absl/time:time", + ) + def _com_google_protobuf(): _repository_impl( "com_google_protobuf", @@ -709,6 +728,9 @@ def _com_github_gperftools_gperftools(): def _foreign_cc_dependencies(): _repository_impl("rules_foreign_cc") +def _rules_proto_dependencies(): + _repository_impl("rules_proto") + def _is_linux(ctxt): return ctxt.os.name == "linux" diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index ab0285435934..ffaca75a7862 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -93,10 +93,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/gperftools/gperftools/archive/fc00474ddc21fff618fc3f009b46590e241e425e.tar.gz"], ), com_github_grpc_grpc = dict( - sha256 = "bcb01ac7029a7fb5219ad2cbbc4f0a2df3ef32db42e236ce7814597f4b04b541", - strip_prefix = "grpc-79a8b5289e3122d2cea2da3be7151d37313d6f46", - # Commit from 2019-05-30 - urls = ["https://github.com/grpc/grpc/archive/79a8b5289e3122d2cea2da3be7151d37313d6f46.tar.gz"], + sha256 = "cce1d4585dd017980d4a407d8c5e9f8fc8c1dbb03f249b99e88a387ebb45a035", + strip_prefix = "grpc-1.22.1", + urls = ["https://github.com/grpc/grpc/archive/v1.22.1.tar.gz"], ), com_github_luajit_luajit = dict( sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8", @@ -210,9 +209,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/googletest/archive/d7003576dd133856432e2e07340f45926242cc3a.tar.gz"], ), com_google_protobuf = dict( - sha256 = "b7220b41481011305bf9100847cf294393973e869973a9661046601959b2960b", - strip_prefix = "protobuf-3.8.0", - urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.8.0/protobuf-all-3.8.0.tar.gz"], + sha256 = "3040a5b946d9df7aa89c0bf6981330bf92b7844fd90e71b61da0c721e421a421", + strip_prefix = "protobuf-3.9.1", + urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.9.1/protobuf-all-3.9.1.tar.gz"], ), grpc_httpjson_transcoding = dict( sha256 = "a447458b47ea4dc1d31499f555769af437c5d129d988ec1e13d5fdd0a6a36b4e", @@ -225,20 +224,29 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.19.2/rules_go-0.19.2.tar.gz"], ), rules_foreign_cc = dict( - sha256 = "c957e6663094a1478c43330c1bbfa71afeaf1ab86b7565233783301240c7a0ab", - strip_prefix = "rules_foreign_cc-a209b642c7687a8894c19b3dd40e43e6d3f38e83", - # 2019-07-17 - urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/a209b642c7687a8894c19b3dd40e43e6d3f38e83.tar.gz"], + sha256 = "ea7e4f13b66009d46c01a0292cf1d590d8ea06775c315263abb66022dde25315", + strip_prefix = "rules_foreign_cc-16ddc00bd4e1b3daf3faee1605a168f5283326fa", + # 2019-09-18 + urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/16ddc00bd4e1b3daf3faee1605a168f5283326fa.tar.gz"], + ), + rules_proto = dict( + sha256 = "602e7161d9195e50246177e7c55b2f39950a9cf7366f74ed5f22fd45750cd208", + strip_prefix = "rules_proto-97d8af4dc474595af3900dd85cb3a29ad28cc313", + # 2019-08-02 + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_proto/archive/97d8af4dc474595af3900dd85cb3a29ad28cc313.tar.gz", + "https://github.com/bazelbuild/rules_proto/archive/97d8af4dc474595af3900dd85cb3a29ad28cc313.tar.gz", + ], ), six_archive = dict( sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a", urls = ["https://files.pythonhosted.org/packages/b3/b2/238e2590826bfdd113244a40d9d3eb26918bd798fc187e2360a8367068db/six-1.10.0.tar.gz"], ), io_opencensus_cpp = dict( - sha256 = "145e42594db358905737dc07400657be62a2961f4e93ab7f4c9765dd2441033c", - strip_prefix = "opencensus-cpp-cc198ff64569bc47beed5384777a4bb563d268e7", - # 2019-09-04 - urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/cc198ff64569bc47beed5384777a4bb563d268e7.tar.gz"], + sha256 = "29b2be0d92523a20daef7045e547c517ca0f6e6a0ddd7e7fcd15d162c13b6d9a", + strip_prefix = "opencensus-cpp-1bbde06b72e0516b74b6992d204f254073047b10", + # 2019-09-17 + urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/1bbde06b72e0516b74b6992d204f254073047b10.tar.gz"], ), com_github_curl = dict( sha256 = "4376ac72b95572fb6c4fbffefb97c7ea0dd083e1974c0e44cd7e49396f454839", diff --git a/ci/do_ci.sh b/ci/do_ci.sh index f94d96fc8cdd..95722aa6b23b 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -260,17 +260,39 @@ elif [[ "$CI_TARGET" == "bazel.fuzz" ]]; then echo "Building envoy fuzzers and executing 100 fuzz iterations..." bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=asan-fuzzer ${FUZZ_TEST_TARGETS} --test_arg="-runs=10" exit 0 +elif [[ "$CI_TARGET" == "bazel.fuzzit_regression" ]]; then + setup_clang_toolchain + FUZZ_TEST_TARGETS="$(bazel query "attr('tags','fuzzer',${TEST_TARGETS})")" + echo "bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS}" + echo "Building fuzzers and run a regression with corpus from Fuzzit" + bazel_with_collection build ${BAZEL_BUILD_OPTIONS} --config=asan-fuzzer ${FUZZ_TEST_TARGETS} + ./ci/run_fuzzit.sh local-regression + exit 0 +elif [[ "$CI_TARGET" == "bazel.fuzzit_fuzzing" ]]; then + setup_clang_toolchain + FUZZ_TEST_TARGETS="$(bazel query "attr('tags','fuzzer',${TEST_TARGETS})")" + echo "bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS}" + echo "Build fuzzers and push them to Fuzzit servers for continuous fuzzing" + bazel_with_collection build ${BAZEL_BUILD_OPTIONS} --config=asan-fuzzer ${FUZZ_TEST_TARGETS} + ./ci/run_fuzzit.sh fuzzing + exit 0 elif [[ "$CI_TARGET" == "fix_format" ]]; then + # proto_format.sh needs to build protobuf. + setup_clang_toolchain echo "fix_format..." ./tools/check_format.py fix ./tools/format_python_tools.sh fix + ./tools/proto_format.sh fix exit 0 elif [[ "$CI_TARGET" == "check_format" ]]; then + # proto_format.sh needs to build protobuf. + setup_clang_toolchain echo "check_format_test..." ./tools/check_format_test_helper.py --log=WARN echo "check_format..." ./tools/check_format.py check ./tools/format_python_tools.sh check + ./tools/proto_format.sh check exit 0 elif [[ "$CI_TARGET" == "check_repositories" ]]; then echo "check_repositories..." diff --git a/ci/go_mirror.sh b/ci/go_mirror.sh new file mode 100755 index 000000000000..80be4cc0b532 --- /dev/null +++ b/ci/go_mirror.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set -e + +if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] +then + tools/api/generate_go_protobuf.py +fi diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index ede21c5de859..89f2cb6db41b 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -23,9 +23,9 @@ USER_GROUP=root mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" # Since we specify an explicit hash, docker-run will pull from the remote repo if missing. docker run --rm ${DOCKER_TTY_OPTION} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \ - -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build ${GIT_VOLUME_OPTION} \ + -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build -v /var/run/docker.sock:/var/run/docker.sock ${GIT_VOLUME_OPTION} \ -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE \ - -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE \ + -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE -e FUZZIT_API_KEY \ -v "$PWD":/source --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${IMAGE_NAME}":"${IMAGE_ID}" \ /bin/bash -lc "groupadd --gid $(id -g) -f envoygroup && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home \ --home-dir /source envoybuild && usermod -a -G pcap envoybuild && su envoybuild -c \"cd source && $*\"" diff --git a/ci/run_fuzzit.sh b/ci/run_fuzzit.sh new file mode 100755 index 000000000000..fcbb5120eb94 --- /dev/null +++ b/ci/run_fuzzit.sh @@ -0,0 +1,33 @@ +#!/bin/bash -eux + +# Dynamically source fuzzing targets +declare -r FUZZER_TARGETS_CC=$(find . -name *_fuzz_test.cc) +declare -r FUZZER_TARGETS="$(for t in ${FUZZER_TARGETS_CC}; do echo "${t:2:-3}"; done)" + +declare BAZEL_BUILD_TARGETS="" +for t in ${FUZZER_TARGETS} +do + declare BAZEL_PATH="//"$(dirname "$t")":"$(basename "$t") + declare TAGGED=$(bazel query "attr('tags', 'no_fuzz', ${BAZEL_PATH})") + if [ -z "${TAGGED}" ] + then + FILTERED_FUZZER_TARGETS+="$t " + fi +done + + +# run fuzzing regression or upload to Fuzzit for long running fuzzing job ($1 is either local-regression or fuzzing) +wget -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.57/fuzzit_Linux_x86_64 +chmod a+x fuzzit + +PREFIX=$(realpath /build/tmp/_bazel_bazel/*/execroot/envoy/bazel-out/k8-fastbuild/bin) +for t in ${FILTERED_FUZZER_TARGETS} +do + TARGET_BASE="$(expr "$t" : '.*/\(.*\)_fuzz_test')" + # Fuzzit target names can't contain underscore + FUZZIT_TARGET_NAME=${TARGET_BASE//_/-} + if [ $1 == "fuzzing" ]; then + ./fuzzit create target --skip-if-exists --public-corpus envoyproxy/"${FUZZIT_TARGET_NAME}" + fi + ./fuzzit create job --skip-if-not-exists --type $1 envoyproxy/"${FUZZIT_TARGET_NAME}" "${PREFIX}"/"${t}"_with_libfuzzer +done diff --git a/docs/build.sh b/docs/build.sh index a2c64b123f59..8a43163f6c56 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -46,26 +46,40 @@ mkdir -p "${GENERATED_RST_DIR}" source_venv "$BUILD_DIR" pip3 install -r "${SCRIPT_DIR}"/requirements.txt +# Clean up any stale files in the API tree output. Bazel remembers valid cached +# files still. +rm -rf bazel-bin/external/envoy_api + bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \ tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED=1 \ --action_env=ENVOY_BLOB_SHA --spawn_strategy=standalone --host_force_python=PY3 -declare -r DOCS_DEPS=$(bazel query "labels(deps, @envoy_api//docs:protos)") +# We do ** matching below to deal with Bazel cache blah (source proto artifacts +# are nested inside source package targets). +shopt -s globstar + +# Find all source protos. +declare -r PROTO_TARGET=$(bazel query "labels(srcs, labels(deps, @envoy_api//docs:protos))") # Only copy in the protos we care about and know how to deal with in protodoc. -for PROTO_TARGET in ${DOCS_DEPS} +for p in ${PROTO_TARGET} do - for p in $(bazel query "labels(srcs, ${PROTO_TARGET})" ) - do - declare PROTO_TARGET_WITHOUT_PREFIX="${PROTO_TARGET#@envoy_api//}" - declare PROTO_TARGET_CANONICAL="${PROTO_TARGET_WITHOUT_PREFIX/://}" - declare PROTO_FILE_WITHOUT_PREFIX="${p#@envoy_api//}" - declare PROTO_FILE_CANONICAL="${PROTO_FILE_WITHOUT_PREFIX/://}" - declare DEST="${GENERATED_RST_DIR}/api-v2/${PROTO_FILE_CANONICAL#envoy/}".rst - mkdir -p "$(dirname "${DEST}")" - cp -f bazel-bin/external/envoy_api/"${PROTO_TARGET_CANONICAL}/${PROTO_FILE_CANONICAL}.rst" "$(dirname "${DEST}")" - [ -n "${CPROFILE_ENABLED}" ] && cp -f bazel-bin/"${p}".profile "$(dirname "${DEST}")" - done + declare PROTO_FILE_WITHOUT_PREFIX="${p#@envoy_api//}" + declare PROTO_FILE_CANONICAL="${PROTO_FILE_WITHOUT_PREFIX/://}" + # We use ** glob matching here to deal with the fact that we have something + # like + # bazel-bin/external/envoy_api/envoy/admin/v2alpha/pkg/envoy/admin/v2alpha/certs.proto.proto + # and we don't want to have to do a nested loop and slow bazel query to + # recover the canonical package part of the path. + declare SRCS=(bazel-bin/external/envoy_api/**/"${PROTO_FILE_CANONICAL}.rst") + # While we may have reformatted the file multiple times due to the transitive + # dependencies in the aspect above, they all look the same. So, just pick an + # arbitrary match and we're done. + declare SRC="${SRCS[0]}" + declare DST="${GENERATED_RST_DIR}/api-v2/${PROTO_FILE_CANONICAL#envoy/}".rst + + mkdir -p "$(dirname "${DST}")" + cp -f "${SRC}" "$(dirname "${DST}")" done mkdir -p ${GENERATED_RST_DIR}/api-docs diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index 03eea44cb751..5686bf3b4f45 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -32,6 +32,26 @@ Every listener has a statistics tree rooted at *listener.
.* with the fo ssl.sigalgs., Counter, Total successful TLS connections that used signature algorithm ssl.versions., Counter, Total successful TLS connections that used protocol version +.. _config_listener_stats_per_handler: + +Per-handler Listener Stats +-------------------------- + +Every listener additionally has a statistics tree rooted at *listener.
..* which +contains *per-handler* statistics. As described in the +:ref:`threading model ` documentation, Envoy has a threading model which +includes the *main thread* as well as a number of *worker threads* which are controlled by the +:option:`--concurrency` option. Along these lines, ** is equal to *main_thread*, +*worker_0*, *worker_1*, etc. These statistics can be used to look for per-handler/worker imbalance +on either accepted or active connections. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + downstream_cx_total, Counter, Total connections on this handler. + downstream_cx_active, Gauge, Total active connections on this handler. + Listener manager ---------------- diff --git a/docs/root/configuration/observability/access_log.rst b/docs/root/configuration/observability/access_log.rst index 9350cc5fb522..7ec4b57f0800 100644 --- a/docs/root/configuration/observability/access_log.rst +++ b/docs/root/configuration/observability/access_log.rst @@ -274,6 +274,26 @@ The following command operators are supported: :ref:`proxy proto ` or :ref:`x-forwarded-for `. +%DOWNSTREAM_DIRECT_REMOTE_ADDRESS% + Direct remote address of the downstream connection. If the address is an IP address it includes both + address and port. + + .. note:: + + This is always the physical remote address of the peer even if the downstream remote address has + been inferred from :ref:`proxy proto ` + or :ref:`x-forwarded-for `. + +%DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT% + The direct remote address of the downstream connection. If the address is an IP address the output does + *not* include port. + + .. note:: + + This is always the physical remote address of the peer even if the downstream remote address has + been inferred from :ref:`proxy proto ` + or :ref:`x-forwarded-for `. + %DOWNSTREAM_LOCAL_ADDRESS% Local address of the downstream connection. If the address is an IP address it includes both address and port. diff --git a/docs/root/configuration/operations/runtime.rst b/docs/root/configuration/operations/runtime.rst index 6c9f81b84d47..47ebcfe7400c 100644 --- a/docs/root/configuration/operations/runtime.rst +++ b/docs/root/configuration/operations/runtime.rst @@ -200,7 +200,7 @@ message as a `google.protobuf.Struct modeling a JSON object with the following rules: * Dot separators map to tree edges. -* Scalar leaves (integer, strings, booleans) are represented with their respective JSON type. +* Scalar leaves (integer, strings, booleans, doubles) are represented with their respective JSON type. * :ref:`FractionalPercent ` is represented with via its `canonical JSON encoding `_. @@ -211,6 +211,10 @@ An example representation of a setting for the *health_check.min_interval* key i health_check: min_interval: 5 +.. note:: + + Integer values that are parsed from doubles are rounded down to the nearest whole number. + .. _config_runtime_comments: Comments @@ -238,7 +242,8 @@ In the second phase the message and filename will be added to :repo:`runtime_features.cc ` and use of that configuration field will cause the config to be rejected by default. This fail-by-default mode can be overridden in runtime configuration by setting -envoy.deprecated_features.filename.proto:fieldname to true. For example, for a deprecated field +envoy.deprecated_features.filename.proto:fieldname or envoy.deprecated_features.filename.proto:enum_value +to true. For example, for a deprecated field ``Foo.Bar.Eep`` in ``baz.proto`` set ``envoy.deprecated_features.baz.proto:Eep`` to ``true``. Use of this override is **strongly discouraged**. Fatal-by-default configuration indicates that the removal of the old code paths is imminent. It is diff --git a/docs/root/install/tools/route_table_check_tool.rst b/docs/root/install/tools/route_table_check_tool.rst index 186df20b130e..83f094dfd6c4 100644 --- a/docs/root/install/tools/route_table_check_tool.rst +++ b/docs/root/install/tools/route_table_check_tool.rst @@ -48,7 +48,7 @@ Usage --covall Enables comprehensive code coverage percent calculation taking into account all the possible - asserts. + asserts. Displays missing tests. --disable-deprecation-check Disables the deprecation check for RouteConfiguration proto. @@ -65,7 +65,7 @@ Output The program exits with status EXIT_FAILURE if any test case does not match the expected route parameter value. - If a test fails, details of the failed test cases are printed if ``-details`` flag is provided. + If a test fails, details of the failed test cases are printed if ``--details`` flag is provided. The first field is the expected route parameter value. The second field is the actual route parameter value. The third field indicates the parameter that is compared. In the following example, Test_2 and Test_5 failed while the other tests diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index 62b3536c75ff..969be0791e53 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -4,6 +4,7 @@ Version history 1.12.0 (pending) ================ * access log: added :ref:`buffering ` and :ref:`periodical flushing ` support to gRPC access logger. Defaults to 16KB buffer and flushing every 1 second. +* access log: added DOWNSTREAM_DIRECT_REMOTE_ADDRESS and DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT :ref:`access log formatters ` and gRPC access logger. * access log: gRPC Access Log Service (ALS) support added for :ref:`TCP access logs `. * access log: reintroduce :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes * admin: added ability to configure listener :ref:`socket options `. @@ -22,15 +23,19 @@ Version history * config: changed the default value of :ref:`initial_fetch_timeout ` from 0s to 15s. This is a change in behaviour in the sense that Envoy will move to the next initialization phase, even if the first config is not delivered in 15s. Refer to :ref:`initialization process ` for more details. * config: added stat :ref:`init_fetch_timeout `. * ext_authz: added :ref:`configurable ability ` to send dynamic metadata to the `ext_authz` service. +* ext_authz: added tracing to the HTTP client. * fault: added overrides for default runtime keys in :ref:`HTTPFault ` filter. * grpc: added :ref:`AWS IAM grpc credentials extension ` for AWS-managed xDS. * grpc-json: added support for :ref:`ignoring unknown query parameters`. +* grpc-json: added support for :ref:`the grpc-status-details-bin header`. * header to metadata: added :ref:`PROTOBUF_VALUE ` and :ref:`ValueEncode ` to support protobuf Value and Base64 encoding. * http: added the ability to reject HTTP/1.1 requests with invalid HTTP header values, using the runtime feature `envoy.reloadable_features.strict_header_validation`. * http: changed Envoy to forward existing x-forwarded-proto from upstream trusted proxies. Guarded by `envoy.reloadable_features.trusted_forwarded_proto` which defaults true. * http: added the ability to configure the behavior of the server response header, via the :ref:`server_header_transformation` field. * http: added the ability to :ref:`merge adjacent slashes` in the path. +* http: :ref:`AUTO ` codec protocol inference now requires the H2 magic bytes to be the first bytes transmitted by a downstream client. * http: remove h2c upgrade headers for HTTP/1 as h2c upgrades are currently not supported. +* http: absolute URL support is now on by default. The prior behavior can be reinstated by setting :ref:`allow_absolute_url ` to false. * listeners: added :ref:`continue_on_listener_filters_timeout ` to configure whether a listener will still create a connection when listener filters time out. * listeners: added :ref:`HTTP inspector listener filter `. * lua: extended `httpCall()` and `respond()` APIs to accept headers with entry values that can be a string or table of strings. @@ -54,7 +59,12 @@ Version history * router check tool: add comprehensive coverage reporting. * router check tool: add deprecated field check. * router check tool: add flag for only printing results of failed tests. +* router check tool: add support for outputting missing tests in the detailed coverage report. +* runtime: allow for the ability to parse integers as double values and vice-versa. * server: added a post initialization lifecycle event, in addition to the existing startup and shutdown events. +* server: added :ref:`per-handler listener stats ` and + :ref:`per-worker watchdog stats ` to help diagnosing event + loop imbalance and general performance issues. * thrift_proxy: fix crashing bug on invalid transport/protocol framing * tls: added verification of IP address SAN fields in certificates against configured SANs in the * tracing: added support to the Zipkin reporter for sending list of spans as Zipkin JSON v2 and protobuf message over HTTP. diff --git a/docs/root/operations/performance.rst b/docs/root/operations/performance.rst index d7066374f3ed..555e7a03b5eb 100644 --- a/docs/root/operations/performance.rst +++ b/docs/root/operations/performance.rst @@ -34,8 +34,8 @@ to true. the wire individually because the statsd protocol doesn't have any way to represent a histogram summary. Be aware that this can be a very large volume of data. -Statistics ----------- +Event loop statistics +--------------------- The event dispatcher for the main thread has a statistics tree rooted at *server.dispatcher.*, and the event dispatcher for each worker thread has a statistics tree rooted at @@ -49,3 +49,24 @@ the event dispatcher for each worker thread has a statistics tree rooted at poll_delay_us, Histogram, Polling delays in microseconds Note that any auxiliary threads are not included here. + +.. _operations_performance_watchdog: + +Watchdog +-------- + +In addition to event loop statistics, Envoy also include a configurable +:ref:`watchdog ` system that can increment +statistics when Envoy is not responsive and optionally kill the server. The statistics are useful +for understanding at a high level whether Envoy's event loop is not responsive either because it is +doing too much work, blocking, or not being scheduled by the OS. + +The watchdog emits statistics in both the *server.* and *server..* trees. +** is equal to *main_thread*, *worker_0*, *worker_1*, etc. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + watchdog_miss, Counter, Number of standard misses + watchdog_mega_miss, Counter, Number of mega misses diff --git a/include/envoy/api/BUILD b/include/envoy/api/BUILD index bade7600a3eb..0bdbc5a87588 100644 --- a/include/envoy/api/BUILD +++ b/include/envoy/api/BUILD @@ -14,6 +14,7 @@ envoy_cc_library( deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/filesystem:filesystem_interface", + "//include/envoy/server:process_context_interface", "//include/envoy/thread:thread_interface", ], ) diff --git a/include/envoy/api/api.h b/include/envoy/api/api.h index 8e2a019777b5..85b3ae8b81dd 100644 --- a/include/envoy/api/api.h +++ b/include/envoy/api/api.h @@ -6,6 +6,7 @@ #include "envoy/common/time.h" #include "envoy/event/dispatcher.h" #include "envoy/filesystem/filesystem.h" +#include "envoy/server/process_context.h" #include "envoy/stats/store.h" #include "envoy/thread/thread.h" @@ -52,6 +53,11 @@ class Api { * @return a constant reference to the root Stats::Scope */ virtual const Stats::Scope& rootScope() PURE; + + /** + * @return an optional reference to the ProcessContext + */ + virtual OptProcessContextRef processContext() PURE; }; using ApiPtr = std::unique_ptr; diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index 468cf17fa967..1e29bf5cd953 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -181,7 +181,7 @@ class Instance { virtual uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) PURE; /** - * Search for an occurrence of a buffer within the larger buffer. + * Search for an occurrence of data within the buffer. * @param data supplies the data to search for. * @param size supplies the length of the data to search for. * @param start supplies the starting index to search from. @@ -189,6 +189,13 @@ class Instance { */ virtual ssize_t search(const void* data, uint64_t size, size_t start) const PURE; + /** + * Search for an occurrence of data at the start of a buffer. + * @param data supplies the data to search for. + * @return true if this buffer starts with data, false otherwise. + */ + virtual bool startsWith(absl::string_view data) const PURE; + /** * Constructs a flattened string from a buffer. * @return the flattened string. diff --git a/include/envoy/network/connection_handler.h b/include/envoy/network/connection_handler.h index 3ea5df65d84c..5f7a9d0fd4df 100644 --- a/include/envoy/network/connection_handler.h +++ b/include/envoy/network/connection_handler.h @@ -9,8 +9,6 @@ #include "envoy/network/listener.h" #include "envoy/ssl/context.h" -#include "spdlog/spdlog.h" - namespace Envoy { namespace Network { @@ -84,6 +82,11 @@ class ConnectionHandler { */ virtual void enableListeners() PURE; + /** + * @return the stat prefix used for per-handler stats. + */ + virtual const std::string& statPrefix() PURE; + /** * Used by ConnectionHandler to manage listeners. */ @@ -95,10 +98,12 @@ class ConnectionHandler { * @return the tag value as configured. */ virtual uint64_t listenerTag() PURE; + /** * @return the actual Listener object. */ virtual Listener* listener() PURE; + /** * Destroy the actual Listener it wraps. */ @@ -111,8 +116,7 @@ class ConnectionHandler { using ConnectionHandlerPtr = std::unique_ptr; /** - * A registered factory interface to create different kinds of - * ActiveUdpListener. + * A registered factory interface to create different kinds of ActiveUdpListener. */ class ActiveUdpListenerFactory { public: @@ -123,16 +127,18 @@ class ActiveUdpListenerFactory { * according to given config. * @param parent is the owner of the created ActiveListener objects. * @param dispatcher is used to create actual UDP listener. - * @param logger might not need to be passed in. - * TODO(danzh): investigate if possible to use statically defined logger in ActiveUdpListener - * implementation instead. * @param config provides information needed to create ActiveUdpListener and * UdpListener objects. * @return the ActiveUdpListener created. */ virtual ConnectionHandler::ActiveListenerPtr createActiveUdpListener(ConnectionHandler& parent, Event::Dispatcher& disptacher, - spdlog::logger& logger, Network::ListenerConfig& config) const PURE; + Network::ListenerConfig& config) const PURE; + + /** + * @return true if the UDP passing through listener doesn't form stateful connections. + */ + virtual bool isTransportConnectionless() const PURE; }; using ActiveUdpListenerFactoryPtr = std::unique_ptr; diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index f850ec18fde4..e4ca1ed18a7d 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -54,6 +54,7 @@ class Snapshot { struct Entry { std::string raw_string_value_; absl::optional uint_value_; + absl::optional double_value_; absl::optional fractional_percent_value_; absl::optional bool_value_; }; @@ -186,7 +187,8 @@ class Snapshot { virtual const std::string& get(const std::string& key) const PURE; /** - * Fetch an integer runtime key. + * Fetch an integer runtime key. Runtime keys larger than ~2^53 may not be accurately converted + * into integers and will return default_value. * @param key supplies the key to fetch. * @param default_value supplies the value to return if the key does not exist or it does not * contain an integer. @@ -194,6 +196,15 @@ class Snapshot { */ virtual uint64_t getInteger(const std::string& key, uint64_t default_value) const PURE; + /** + * Fetch a double runtime key. + * @param key supplies the key to fetch. + * @param default_value supplies the value to return if the key does not exist or it does not + * contain a double. + * @return double the runtime value or the default value. + */ + virtual double getDouble(const std::string& key, double default_value) const PURE; + /** * Fetch the OverrideLayers that provide values in this snapshot. Layers are ordered from bottom * to top; for instance, the second layer's entries override the first layer's entries, and so on. diff --git a/include/envoy/server/filter_config.h b/include/envoy/server/filter_config.h index a6286ef4640b..8904e2bcc69e 100644 --- a/include/envoy/server/filter_config.h +++ b/include/envoy/server/filter_config.h @@ -180,10 +180,10 @@ class FactoryContext : public virtual CommonFactoryContext { virtual Grpc::Context& grpcContext() PURE; /** - * @return absl::optional> an optional reference to the + * @return OptProcessContextRef an optional reference to the * process context. Will be unset when running in validation mode. */ - virtual absl::optional> processContext() PURE; + virtual OptProcessContextRef processContext() PURE; }; class ListenerFactoryContext : public virtual FactoryContext { diff --git a/include/envoy/server/guarddog.h b/include/envoy/server/guarddog.h index 4386aa7f9051..08b8f53646a0 100644 --- a/include/envoy/server/guarddog.h +++ b/include/envoy/server/guarddog.h @@ -27,8 +27,10 @@ class GuardDog { * stopWatching() method to remove it from the list of watched objects. * * @param thread_id a Thread::ThreadId containing the system thread id + * @param thread_name supplies the name of the thread which is used for per-thread miss stats. */ - virtual WatchDogSharedPtr createWatchDog(Thread::ThreadId thread_id) PURE; + virtual WatchDogSharedPtr createWatchDog(Thread::ThreadId thread_id, + const std::string& thread_name) PURE; /** * Tell the GuardDog to forget about this WatchDog. diff --git a/include/envoy/server/instance.h b/include/envoy/server/instance.h index d440c8cf4c3d..57305e28af47 100644 --- a/include/envoy/server/instance.h +++ b/include/envoy/server/instance.h @@ -196,7 +196,7 @@ class Instance { /** * @return the server-wide process context. */ - virtual absl::optional> processContext() PURE; + virtual OptProcessContextRef processContext() PURE; /** * @return ThreadLocal::Instance& the thread local storage engine for the server. This is used to diff --git a/include/envoy/server/process_context.h b/include/envoy/server/process_context.h index cbba5ea1592b..5118db1bf53d 100644 --- a/include/envoy/server/process_context.h +++ b/include/envoy/server/process_context.h @@ -2,6 +2,8 @@ #include "envoy/common/pure.h" +#include "absl/types/optional.h" + namespace Envoy { /** @@ -26,4 +28,6 @@ class ProcessContext { virtual ProcessObject& get() const PURE; }; +using OptProcessContextRef = absl::optional>; + } // namespace Envoy diff --git a/include/envoy/server/worker.h b/include/envoy/server/worker.h index f412dc922235..887e39b2e111 100644 --- a/include/envoy/server/worker.h +++ b/include/envoy/server/worker.h @@ -91,9 +91,12 @@ class WorkerFactory { virtual ~WorkerFactory() = default; /** + * @param overload_manager supplies the server's overload manager. + * @param worker_name supplies the name of the worker, used for per-worker stats. * @return WorkerPtr a new worker. */ - virtual WorkerPtr createWorker(OverloadManager& overload_manager) PURE; + virtual WorkerPtr createWorker(OverloadManager& overload_manager, + const std::string& worker_name) PURE; }; } // namespace Server diff --git a/security/email-templates.md b/security/email-templates.md index b4281894e26f..759ca15219ad 100644 --- a/security/email-templates.md +++ b/security/email-templates.md @@ -28,7 +28,7 @@ $PERSON (on behalf of the Envoy maintainers) ``` Subject: [CONFIDENTIAL] Further details on security release of Envoy $VERSION -To: envoy-announce@googlegroups.com +To: cncf-envoy-distributors-announce@lists.cncf.io Cc: envoy-security@googlegroups.com Hello Envoy Distributors, diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index 295c544aa76e..631c9d4f9be2 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -401,6 +401,15 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { return StreamInfo::Utility::formatDownstreamAddressNoPort( *stream_info.downstreamRemoteAddress()); }; + } else if (field_name == "DOWNSTREAM_DIRECT_REMOTE_ADDRESS") { + field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.downstreamDirectRemoteAddress()->asString(); + }; + } else if (field_name == "DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT") { + field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { + return StreamInfo::Utility::formatDownstreamAddressNoPort( + *stream_info.downstreamDirectRemoteAddress()); + }; } else if (field_name == "REQUESTED_SERVER_NAME") { field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { if (!stream_info.requestedServerName().empty()) { diff --git a/source/common/access_log/access_log_manager_impl.cc b/source/common/access_log/access_log_manager_impl.cc index fd576b9b00e1..238b60b7dd15 100644 --- a/source/common/access_log/access_log_manager_impl.cc +++ b/source/common/access_log/access_log_manager_impl.cc @@ -123,7 +123,7 @@ void AccessLogFileImpl::flushThreadFunc() { // flush_event_ can be woken up either by large enough flush_buffer or by timer. // In case it was timer, flush_buffer_ can be empty. - while (flush_buffer_.length() == 0 && !flush_thread_exit_) { + while (flush_buffer_.length() == 0 && !flush_thread_exit_ && !reopen_file_) { // CondVar::wait() does not throw, so it's safe to pass the mutex rather than the guard. flush_event_.wait(write_lock_); } @@ -133,7 +133,6 @@ void AccessLogFileImpl::flushThreadFunc() { } flush_lock = std::unique_lock(flush_lock_); - ASSERT(flush_buffer_.length() > 0); about_to_write_buffer_.move(flush_buffer_); ASSERT(flush_buffer_.length() == 0); } diff --git a/source/common/api/api_impl.cc b/source/common/api/api_impl.cc index 52666328121e..e73c22bffd02 100644 --- a/source/common/api/api_impl.cc +++ b/source/common/api/api_impl.cc @@ -10,9 +10,10 @@ namespace Envoy { namespace Api { Impl::Impl(Thread::ThreadFactory& thread_factory, Stats::Store& store, - Event::TimeSystem& time_system, Filesystem::Instance& file_system) + Event::TimeSystem& time_system, Filesystem::Instance& file_system, + const OptProcessContextRef& process_context) : thread_factory_(thread_factory), store_(store), time_system_(time_system), - file_system_(file_system) {} + file_system_(file_system), process_context_(process_context) {} Event::DispatcherPtr Impl::allocateDispatcher() { return std::make_unique(*this, time_system_); diff --git a/source/common/api/api_impl.h b/source/common/api/api_impl.h index 92c57563e893..6112843d97ba 100644 --- a/source/common/api/api_impl.h +++ b/source/common/api/api_impl.h @@ -17,7 +17,8 @@ namespace Api { class Impl : public Api { public: Impl(Thread::ThreadFactory& thread_factory, Stats::Store& store, Event::TimeSystem& time_system, - Filesystem::Instance& file_system); + Filesystem::Instance& file_system, + const OptProcessContextRef& process_context = absl::nullopt); // Api::Api Event::DispatcherPtr allocateDispatcher() override; @@ -26,12 +27,14 @@ class Impl : public Api { Filesystem::Instance& fileSystem() override { return file_system_; } TimeSource& timeSource() override { return time_system_; } const Stats::Scope& rootScope() override { return store_; } + OptProcessContextRef processContext() override { return process_context_; } private: Thread::ThreadFactory& thread_factory_; Stats::Store& store_; Event::TimeSystem& time_system_; Filesystem::Instance& file_system_; + OptProcessContextRef process_context_; }; } // namespace Api diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index 17391f1aa4b9..06146171b2c4 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -535,6 +535,56 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { } } +bool OwnedImpl::startsWith(absl::string_view data) const { + if (length() < data.length()) { + // Buffer is too short to contain data. + return false; + } + + if (data.length() == 0) { + return true; + } + + if (old_impl_) { + evbuffer_ptr start_ptr, end_ptr; + if (-1 == evbuffer_ptr_set(buffer_.get(), &start_ptr, 0, EVBUFFER_PTR_SET)) { + return false; + } + + if (-1 == evbuffer_ptr_set(buffer_.get(), &end_ptr, data.length(), EVBUFFER_PTR_SET)) { + return false; + } + + evbuffer_ptr result_ptr = + evbuffer_search_range(buffer_.get(), data.data(), data.length(), &start_ptr, &end_ptr); + return result_ptr.pos == 0; + } else { + const uint8_t* prefix = reinterpret_cast(data.data()); + size_t size = data.length(); + for (const auto& slice : slices_) { + uint64_t slice_size = slice->dataSize(); + const uint8_t* slice_start = slice->data(); + + if (slice_size >= size) { + // The remaining size bytes of data are in this slice. + return memcmp(prefix, slice_start, size) == 0; + } + + // Slice is smaller than data, see if the prefix matches. + if (memcmp(prefix, slice_start, slice_size) != 0) { + return false; + } + + // Prefix matched. Continue looking at the next slice. + prefix += slice_size; + size -= slice_size; + } + + // Less data in slices than length() reported. + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + Api::IoCallUint64Result OwnedImpl::write(Network::IoHandle& io_handle) { constexpr uint64_t MaxSlices = 16; RawSlice slices[MaxSlices]; diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 68b361c579d5..114d14dabc94 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -503,6 +503,7 @@ class OwnedImpl : public LibEventInstance { Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override; uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override; ssize_t search(const void* data, uint64_t size, size_t start) const override; + bool startsWith(absl::string_view data) const override; Api::IoCallUint64Result write(Network::IoHandle& io_handle) override; std::string toString() const override; diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 22a0255a0e9f..58f06b447f0c 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -29,6 +29,7 @@ namespace Logger { FUNCTION(client) \ FUNCTION(config) \ FUNCTION(connection) \ + FUNCTION(conn_handler) \ FUNCTION(dubbo) \ FUNCTION(file) \ FUNCTION(filter) \ diff --git a/source/common/config/delta_subscription_state.cc b/source/common/config/delta_subscription_state.cc index bec633841aff..5c5a795b7af0 100644 --- a/source/common/config/delta_subscription_state.cc +++ b/source/common/config/delta_subscription_state.cc @@ -151,11 +151,10 @@ void DeltaSubscriptionState::handleBadResponse(const EnvoyException& e, UpdateAc } void DeltaSubscriptionState::handleEstablishmentFailure() { - disableInitFetchTimeoutTimer(); + // New gRPC stream will be established and send requests again. + // If init_fetch_timeout is non-zero, server will continue startup after it timeout stats_.update_failure_.inc(); stats_.update_attempt_.inc(); - callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, - nullptr); } envoy::api::v2::DeltaDiscoveryRequest DeltaSubscriptionState::getNextRequest() { diff --git a/source/common/config/grpc_mux_subscription_impl.cc b/source/common/config/grpc_mux_subscription_impl.cc index a181b4efa174..dffab9f0caea 100644 --- a/source/common/config/grpc_mux_subscription_impl.cc +++ b/source/common/config/grpc_mux_subscription_impl.cc @@ -81,7 +81,14 @@ void GrpcMuxSubscriptionImpl::onConfigUpdateFailed(ConfigUpdateFailureReason rea ENVOY_LOG(warn, "gRPC config for {} rejected: {}", type_url_, e->what()); break; } + stats_.update_attempt_.inc(); + if (reason == Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure) { + // New gRPC stream will be established and send requests again. + // If init_fetch_timeout is non-zero, server will continue startup after it timeout + return; + } + callbacks_.onConfigUpdateFailed(reason, e); } diff --git a/source/common/config/http_subscription_impl.cc b/source/common/config/http_subscription_impl.cc index 908250c79495..dda038436500 100644 --- a/source/common/config/http_subscription_impl.cc +++ b/source/common/config/http_subscription_impl.cc @@ -38,10 +38,7 @@ HttpSubscriptionImpl::HttpSubscriptionImpl( void HttpSubscriptionImpl::start(const std::set& resource_names) { if (init_fetch_timeout_.count() > 0) { init_fetch_timeout_timer_ = dispatcher_.createTimer([this]() -> void { - ENVOY_LOG(warn, "REST config: initial fetch timed out for", path_); - stats_.init_fetch_timeout_.inc(); - callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, - nullptr); + handleFailure(Config::ConfigUpdateFailureReason::FetchTimedout, nullptr); }); init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_); } @@ -77,8 +74,7 @@ void HttpSubscriptionImpl::parseResponse(const Http::Message& response) { try { MessageUtil::loadFromJson(response.bodyAsString(), message, validation_visitor_); } catch (const EnvoyException& e) { - ENVOY_LOG(warn, "REST config JSON conversion error: {}", e.what()); - handleFailure(nullptr); + handleFailure(Config::ConfigUpdateFailureReason::UpdateRejected, &e); return; } try { @@ -87,23 +83,45 @@ void HttpSubscriptionImpl::parseResponse(const Http::Message& response) { stats_.version_.set(HashUtil::xxHash64(request_.version_info())); stats_.update_success_.inc(); } catch (const EnvoyException& e) { - ENVOY_LOG(warn, "REST config update rejected: {}", e.what()); - stats_.update_rejected_.inc(); - callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); + handleFailure(Config::ConfigUpdateFailureReason::UpdateRejected, &e); } } void HttpSubscriptionImpl::onFetchComplete() {} -void HttpSubscriptionImpl::onFetchFailure(const EnvoyException* e) { - disableInitFetchTimeoutTimer(); - ENVOY_LOG(warn, "REST config update failed: {}", e != nullptr ? e->what() : "fetch failure"); - handleFailure(e); +void HttpSubscriptionImpl::onFetchFailure(Config::ConfigUpdateFailureReason reason, + const EnvoyException* e) { + handleFailure(reason, e); } -void HttpSubscriptionImpl::handleFailure(const EnvoyException* e) { - stats_.update_failure_.inc(); - callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, e); +void HttpSubscriptionImpl::handleFailure(Config::ConfigUpdateFailureReason reason, + const EnvoyException* e) { + + switch (reason) { + case Config::ConfigUpdateFailureReason::ConnectionFailure: + ENVOY_LOG(warn, "REST update for {} failed", path_); + stats_.update_failure_.inc(); + break; + case Config::ConfigUpdateFailureReason::FetchTimedout: + ENVOY_LOG(warn, "REST config: initial fetch timeout for {}", path_); + stats_.init_fetch_timeout_.inc(); + disableInitFetchTimeoutTimer(); + break; + case Config::ConfigUpdateFailureReason::UpdateRejected: + ASSERT(e != nullptr); + ENVOY_LOG(warn, "REST config for {} rejected: {}", path_, e->what()); + stats_.update_rejected_.inc(); + disableInitFetchTimeoutTimer(); + break; + } + + if (reason == Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure) { + // New requests will be sent again. + // If init_fetch_timeout is non-zero, server will continue startup after it timeout + return; + } + + callbacks_.onConfigUpdateFailed(reason, e); } void HttpSubscriptionImpl::disableInitFetchTimeoutTimer() { diff --git a/source/common/config/http_subscription_impl.h b/source/common/config/http_subscription_impl.h index 452ba132582a..6ad8055e4e8b 100644 --- a/source/common/config/http_subscription_impl.h +++ b/source/common/config/http_subscription_impl.h @@ -37,10 +37,10 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, void createRequest(Http::Message& request) override; void parseResponse(const Http::Message& response) override; void onFetchComplete() override; - void onFetchFailure(const EnvoyException* e) override; + void onFetchFailure(Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; private: - void handleFailure(const EnvoyException* e); + void handleFailure(Config::ConfigUpdateFailureReason reason, const EnvoyException* e); void disableInitFetchTimeoutTimer(); std::string path_; diff --git a/source/common/event/BUILD b/source/common/event/BUILD index 76c0dc627cc2..612bcfcf31ec 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -123,20 +123,3 @@ envoy_cc_library( "//source/common/common:scope_tracker", ], ) - -envoy_cc_library( - name = "dispatched_thread_lib", - srcs = ["dispatched_thread.cc"], - hdrs = ["dispatched_thread.h"], - external_deps = [ - "event", - ], - deps = [ - ":dispatcher_lib", - "//include/envoy/api:api_interface", - "//include/envoy/event:dispatcher_interface", - "//source/common/common:minimal_logger_lib", - "//source/common/common:thread_lib", - "//source/server:guarddog_lib", - ], -) diff --git a/source/common/event/dispatched_thread.cc b/source/common/event/dispatched_thread.cc deleted file mode 100644 index c215ff56cbca..000000000000 --- a/source/common/event/dispatched_thread.cc +++ /dev/null @@ -1,41 +0,0 @@ -#include "common/event/dispatched_thread.h" - -#include -#include - -#include "envoy/common/time.h" -#include "envoy/event/dispatcher.h" -#include "envoy/server/configuration.h" -#include "envoy/thread/thread.h" - -#include "spdlog/spdlog.h" - -namespace Envoy { -namespace Event { - -void DispatchedThreadImpl::start(Server::GuardDog& guard_dog) { - thread_ = - api_.threadFactory().createThread([this, &guard_dog]() -> void { threadRoutine(guard_dog); }); -} - -void DispatchedThreadImpl::exit() { - if (thread_) { - dispatcher_->exit(); - thread_->join(); - } -} - -void DispatchedThreadImpl::threadRoutine(Server::GuardDog& guard_dog) { - ENVOY_LOG(debug, "dispatched thread entering dispatch loop"); - auto watchdog = guard_dog.createWatchDog(api_.threadFactory().currentThreadId()); - watchdog->startWatchdog(*dispatcher_); - dispatcher_->run(Dispatcher::RunType::Block); - ENVOY_LOG(debug, "dispatched thread exited dispatch loop"); - guard_dog.stopWatching(watchdog); - - watchdog.reset(); - dispatcher_.reset(); -} - -} // namespace Event -} // namespace Envoy diff --git a/source/common/event/dispatched_thread.h b/source/common/event/dispatched_thread.h deleted file mode 100644 index 9dc5db53d80d..000000000000 --- a/source/common/event/dispatched_thread.h +++ /dev/null @@ -1,67 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "envoy/api/api.h" -#include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" -#include "envoy/server/configuration.h" -#include "envoy/server/guarddog.h" - -#include "common/common/thread.h" -#include "common/event/dispatcher_impl.h" - -namespace Envoy { -namespace Event { - -/** - * Generic dispatched thread. - * - * This provides basic functionality for a thread which has the "Dispatched - * Nature" (runs an event loop) but does not want to do listening and accept new - * connections like regular Worker threads, or any of the other functionality - * specific to "Worker" threads. This is particularly useful if you need a - * special purpose thread that will issue or receive gRPC calls. - * - * These features are set up: - * 1) Dispatcher support: - * open connections, open files, callback posting, timers, listen - * 2) GuardDog deadlock monitoring - * - * These features are not: - * 1) Thread local storage (we don't want runOnAllThreads callbacks to run on - * this thread). - * 2) ConnectionHandler and listeners - * - * TODO(dnoe): Worker should probably be refactored to leverage this. - */ -class DispatchedThreadImpl : Logger::Loggable { -public: - DispatchedThreadImpl(Api::Api& api) : api_(api), dispatcher_(api_.allocateDispatcher()) {} - - /** - * Start the thread. - * - * @param guard_dog GuardDog instance to register with. - */ - void start(Envoy::Server::GuardDog& guard_dog); - - Dispatcher& dispatcher() { return *dispatcher_; } - - /** - * Exit the dispatched thread. Will block until the thread joins. - */ - void exit(); - -private: - void threadRoutine(Envoy::Server::GuardDog& guard_dog); - - Api::Api& api_; - DispatcherPtr dispatcher_; - Thread::ThreadPtr thread_; -}; - -} // namespace Event -} // namespace Envoy diff --git a/source/common/grpc/BUILD b/source/common/grpc/BUILD index 603474289767..f4d6d906e70c 100644 --- a/source/common/grpc/BUILD +++ b/source/common/grpc/BUILD @@ -86,6 +86,7 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/common:assert_lib", + "//source/common/common:base64_lib", "//source/common/common:empty_string", "//source/common/common:enum_to_int", "//source/common/common:hash_lib", @@ -142,6 +143,7 @@ envoy_cc_library( ], deps = [ ":context_lib", + ":google_grpc_context_lib", ":google_grpc_creds_lib", ":google_grpc_utils_lib", ":typed_async_client_lib", @@ -156,6 +158,19 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "google_grpc_context_lib", + srcs = ["google_grpc_context.cc"], + hdrs = ["google_grpc_context.h"], + external_deps = ["grpc"], + deps = [ + "//source/common/common:assert_lib", + "//source/common/common:lock_guard_lib", + "//source/common/common:macros", + "//source/common/common:thread_lib", + ], +) + envoy_cc_library( name = "google_grpc_creds_lib", srcs = ["google_grpc_creds_impl.cc"], diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index 2683044b715c..4299fddd6f3e 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -10,6 +10,7 @@ #include "common/buffer/buffer_impl.h" #include "common/buffer/zero_copy_input_stream_impl.h" #include "common/common/assert.h" +#include "common/common/base64.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" #include "common/common/fmt.h" @@ -54,7 +55,7 @@ absl::optional Common::getGrpcStatus(const Http::HeaderMap& uint64_t grpc_status_code; if (!grpc_status_header || grpc_status_header->value().empty()) { - return {}; + return absl::nullopt; } if (!absl::SimpleAtoi(grpc_status_header->value().getStringView(), &grpc_status_code) || grpc_status_code > Status::GrpcStatus::MaximumValid) { @@ -68,6 +69,27 @@ std::string Common::getGrpcMessage(const Http::HeaderMap& trailers) { return entry ? std::string(entry->value().getStringView()) : EMPTY_STRING; } +absl::optional +Common::getGrpcStatusDetailsBin(const Http::HeaderMap& trailers) { + const Http::HeaderEntry* details_header = trailers.get(Http::Headers::get().GrpcStatusDetailsBin); + if (!details_header) { + return absl::nullopt; + } + + // Some implementations use non-padded base64 encoding for grpc-status-details-bin. + auto decoded_value = Base64::decodeWithoutPadding(details_header->value().getStringView()); + if (decoded_value.empty()) { + return absl::nullopt; + } + + google::rpc::Status status; + if (!status.ParseFromString(decoded_value)) { + return absl::nullopt; + } + + return {std::move(status)}; +} + Buffer::InstancePtr Common::serializeToGrpcFrame(const Protobuf::Message& message) { // http://www.grpc.io/docs/guides/wire.html // Reserve enough space for the entire message and the 5 byte header. diff --git a/source/common/grpc/common.h b/source/common/grpc/common.h index fa5fe72f7bc7..32f4fd02ee36 100644 --- a/source/common/grpc/common.h +++ b/source/common/grpc/common.h @@ -57,6 +57,15 @@ class Common { */ static std::string getGrpcMessage(const Http::HeaderMap& trailers); + /** + * Returns the decoded google.rpc.Status message from a given set of trailers, if present. + * @param trailers the trailers to parse. + * @return std::unique_ptr the gRPC status message or empty pointer if no + * grpc-status-details-bin trailer found or it was invalid. + */ + static absl::optional + getGrpcStatusDetailsBin(const Http::HeaderMap& trailers); + /** * Parse gRPC header 'grpc-timeout' value to a duration in milliseconds. * @param request_headers the header map from which to extract the value of 'grpc-timeout' header. diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index da834f1b82a9..688c846cb2df 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -12,6 +12,7 @@ #include "common/common/linked_object.h" #include "common/common/thread.h" #include "common/common/thread_annotations.h" +#include "common/grpc/google_grpc_context.h" #include "common/grpc/typed_async_client.h" #include "common/tracing/http_tracer_impl.h" @@ -84,6 +85,9 @@ class GoogleAsyncClientThreadLocal : public ThreadLocal::ThreadLocalObject, private: void completionThread(); + // Instantiate this first to ensure grpc_init() is called. + GoogleGrpcContext google_grpc_context_; + // The CompletionQueue for in-flight operations. This must precede completion_thread_ to ensure it // is constructed before the thread runs. grpc::CompletionQueue cq_; diff --git a/source/common/grpc/google_grpc_context.cc b/source/common/grpc/google_grpc_context.cc new file mode 100644 index 000000000000..0a337ec590c9 --- /dev/null +++ b/source/common/grpc/google_grpc_context.cc @@ -0,0 +1,44 @@ +#include "common/grpc/google_grpc_context.h" + +#include + +#include "common/common/assert.h" +#include "common/common/lock_guard.h" +#include "common/common/macros.h" +#include "common/common/thread.h" + +#include "grpcpp/grpcpp.h" + +namespace Envoy { +namespace Grpc { + +GoogleGrpcContext::GoogleGrpcContext() : instance_tracker_(instanceTracker()) { + Thread::LockGuard lock(instance_tracker_.mutex_); + if (++instance_tracker_.live_instances_ == 1) { +#ifdef ENVOY_GOOGLE_GRPC + grpc_init(); +#endif + } +} + +GoogleGrpcContext::~GoogleGrpcContext() { + // Per https://github.com/grpc/grpc/issues/20303 it is OK to call + // grpc_shutdown_blocking() as long as no one can concurrently call + // grpc_init(). We use check_format.py to ensure that this file contains the + // only callers to grpc_init(), and the mutex to then make that guarantee + // across users of this class. + Thread::LockGuard lock(instance_tracker_.mutex_); + ASSERT(instance_tracker_.live_instances_ > 0); + if (--instance_tracker_.live_instances_ == 0) { +#ifdef ENVOY_GOOGLE_GRPC + grpc_shutdown_blocking(); // Waiting for quiescence avoids non-determinism in tests. +#endif + } +} + +GoogleGrpcContext::InstanceTracker& GoogleGrpcContext::instanceTracker() { + MUTABLE_CONSTRUCT_ON_FIRST_USE(InstanceTracker); +} + +} // namespace Grpc +} // namespace Envoy diff --git a/source/common/grpc/google_grpc_context.h b/source/common/grpc/google_grpc_context.h new file mode 100644 index 000000000000..b83f29a12214 --- /dev/null +++ b/source/common/grpc/google_grpc_context.h @@ -0,0 +1,33 @@ +#pragma once + +#include "common/common/thread.h" + +namespace Envoy { +namespace Grpc { + +// Captures global grpc initialization and shutdown. Note that grpc +// initialization starts several threads, so it is a little annoying to run them +// alongside unrelated tests, particularly if they are trying to track memory +// usage, or you are exploiting otherwise consistent run-to-run pointer values +// during debug. +// +// Instantiating this class makes it easy to ensure classes that depend on grpc +// libraries get them initialized. +class GoogleGrpcContext { +public: + GoogleGrpcContext(); + ~GoogleGrpcContext(); + +private: + struct InstanceTracker { + Thread::MutexBasicLockable mutex_; + uint64_t live_instances_ GUARDED_BY(mutex_) = 0; + }; + + static InstanceTracker& instanceTracker(); + + InstanceTracker& instance_tracker_; +}; + +} // namespace Grpc +} // namespace Envoy diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 3195e1791728..ea0349a473be 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -31,7 +31,7 @@ std::string ConnectionManagerUtility::determineNextProtocol(Network::Connection& // See if the data we have so far shows the HTTP/2 prefix. We ignore the case where someone sends // us the first few bytes of the HTTP/2 prefix since in all public cases we use SSL/ALPN. For // internal cases this should practically never happen. - if (-1 != data.search(Http2::CLIENT_MAGIC_PREFIX.c_str(), Http2::CLIENT_MAGIC_PREFIX.size(), 0)) { + if (data.startsWith(Http2::CLIENT_MAGIC_PREFIX)) { return Http2::ALPN_STRING; } diff --git a/source/common/http/headers.h b/source/common/http/headers.h index f4d830dab193..1bdeb0909014 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -125,6 +125,7 @@ class HeaderValues { const LowerCaseString GrpcStatus{"grpc-status"}; const LowerCaseString GrpcTimeout{"grpc-timeout"}; const LowerCaseString GrpcAcceptEncoding{"grpc-accept-encoding"}; + const LowerCaseString GrpcStatusDetailsBin{"grpc-status-details-bin"}; const LowerCaseString Host{":authority"}; const LowerCaseString HostLegacy{"host"}; const LowerCaseString Http2Settings{"http2-settings"}; diff --git a/source/common/http/rest_api_fetcher.cc b/source/common/http/rest_api_fetcher.cc index 9702f7081ad1..762623c0e980 100644 --- a/source/common/http/rest_api_fetcher.cc +++ b/source/common/http/rest_api_fetcher.cc @@ -41,14 +41,16 @@ void RestApiFetcher::onSuccess(Http::MessagePtr&& response) { try { parseResponse(*response); } catch (EnvoyException& e) { - onFetchFailure(&e); + onFetchFailure(Config::ConfigUpdateFailureReason::UpdateRejected, &e); } requestComplete(); } -void RestApiFetcher::onFailure(Http::AsyncClient::FailureReason) { - onFetchFailure(nullptr); +void RestApiFetcher::onFailure(Http::AsyncClient::FailureReason reason) { + // Currently Http::AsyncClient::FailureReason only has one value: "Reset". + ASSERT(reason == Http::AsyncClient::FailureReason::Reset); + onFetchFailure(Config::ConfigUpdateFailureReason::ConnectionFailure, nullptr); requestComplete(); } diff --git a/source/common/http/rest_api_fetcher.h b/source/common/http/rest_api_fetcher.h index 48b5636079b7..80ab2d3185fe 100644 --- a/source/common/http/rest_api_fetcher.h +++ b/source/common/http/rest_api_fetcher.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" #include "envoy/runtime/runtime.h" #include "envoy/upstream/cluster_manager.h" @@ -46,9 +47,11 @@ class RestApiFetcher : public Http::AsyncClient::Callbacks { /** * This will be called if the fetch fails (either due to non-200 response, network error, etc.). + * @param reason supplies the fetch failure reason. * @param e supplies any exception data on why the fetch failed. May be nullptr. */ - virtual void onFetchFailure(const EnvoyException* e) PURE; + virtual void onFetchFailure(Config::ConfigUpdateFailureReason reason, + const EnvoyException* e) PURE; protected: const std::string remote_cluster_name_; diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 11bedbeba907..65a708903745 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -273,7 +273,7 @@ Utility::parseHttp2Settings(const envoy::api::v2::core::Http2ProtocolOptions& co Http1Settings Utility::parseHttp1Settings(const envoy::api::v2::core::Http1ProtocolOptions& config) { Http1Settings ret; - ret.allow_absolute_url_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, allow_absolute_url, false); + ret.allow_absolute_url_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, allow_absolute_url, true); ret.accept_http_10_ = config.accept_http_10(); ret.default_host_for_http_10_ = config.default_host_for_http_10(); return ret; diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index c991ad1bb0a8..e50f4528f004 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -50,7 +50,8 @@ bool ipFamilySupported(int domain) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); const Api::SysCallIntResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); if (result.rc_ >= 0) { - RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, ""); + RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, + absl::StrCat("Fail to close fd: response code ", result.rc_)); } return result.rc_ != -1; } diff --git a/source/common/protobuf/protobuf.h b/source/common/protobuf/protobuf.h index ff6da52694f3..620f3b1c0b4d 100644 --- a/source/common/protobuf/protobuf.h +++ b/source/common/protobuf/protobuf.h @@ -7,6 +7,7 @@ #include "google/protobuf/any.pb.h" #include "google/protobuf/descriptor.h" #include "google/protobuf/descriptor.pb.h" +#include "google/protobuf/descriptor_database.h" #include "google/protobuf/empty.pb.h" #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index f2c94278313f..9539e8d3b827 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -88,6 +88,20 @@ ProtoValidationException::ProtoValidationException(const std::string& validation ENVOY_LOG_MISC(debug, "Proto validation error; throwing {}", what()); } +size_t MessageUtil::hash(const Protobuf::Message& message) { + std::string text_format; + + { + Protobuf::TextFormat::Printer printer; + printer.SetExpandAny(true); + printer.SetUseFieldNumber(true); + printer.SetSingleLineMode(true); + printer.PrintToString(message, &text_format); + } + + return HashUtil::xxHash64(text_format); +} + void MessageUtil::loadFromJson(const std::string& json, Protobuf::Message& message, ProtobufMessage::ValidationVisitor& validation_visitor) { Protobuf::util::JsonParseOptions options; @@ -165,6 +179,51 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa } } +void checkForDeprecatedNonRepeatedEnumValue(const Protobuf::Message& message, + absl::string_view filename, + const Protobuf::FieldDescriptor* field, + const Protobuf::Reflection* reflection, + Runtime::Loader* runtime) { + // Repeated fields will be handled by recursion in checkForUnexpectedFields. + if (field->is_repeated() || field->cpp_type() != Protobuf::FieldDescriptor::CPPTYPE_ENUM) { + return; + } + + bool default_value = !reflection->HasField(message, field); + + const Protobuf::EnumValueDescriptor* enum_value_descriptor = reflection->GetEnum(message, field); + if (!enum_value_descriptor->options().deprecated()) { + return; + } + std::string err = fmt::format( + "Using {}deprecated value {} for enum '{}' from file {}. This enum value will be removed " + "from Envoy soon{}. Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated " + "for details.", + (default_value ? "the default now-" : ""), enum_value_descriptor->name(), field->full_name(), + filename, (default_value ? " so a non-default value must now be explicitly set" : "")); +#ifdef ENVOY_DISABLE_DEPRECATED_FEATURES + bool warn_only = false; +#else + bool warn_only = true; +#endif + + if (runtime && !runtime->snapshot().deprecatedFeatureEnabled(absl::StrCat( + "envoy.deprecated_features.", filename, ":", enum_value_descriptor->name()))) { + warn_only = false; + } + + if (warn_only) { + ENVOY_LOG_MISC(warn, "{}", err); + } else { + const char fatal_error[] = + " If continued use of this field is absolutely necessary, see " + "https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime" + "#using-runtime-overrides-for-deprecated-features for how to apply a temporary and " + "highly discouraged override."; + throw ProtoValidationException(err + fatal_error, message); + } +} + void MessageUtil::checkForUnexpectedFields(const Protobuf::Message& message, ProtobufMessage::ValidationVisitor& validation_visitor, Runtime::Loader* runtime) { @@ -185,7 +244,12 @@ void MessageUtil::checkForUnexpectedFields(const Protobuf::Message& message, const Protobuf::Descriptor* descriptor = message.GetDescriptor(); const Protobuf::Reflection* reflection = message.GetReflection(); for (int i = 0; i < descriptor->field_count(); ++i) { - const auto* field = descriptor->field(i); + const Protobuf::FieldDescriptor* field = descriptor->field(i); + absl::string_view filename = filenameFromPath(field->file()->name()); + + // Before we check to see if the field is in use, see if there's a + // deprecated default enum value. + checkForDeprecatedNonRepeatedEnumValue(message, filename, field, reflection, runtime); // If this field is not in use, continue. if ((field->is_repeated() && reflection->FieldSize(message, field) == 0) || @@ -198,7 +262,6 @@ void MessageUtil::checkForUnexpectedFields(const Protobuf::Message& message, #else bool warn_only = true; #endif - absl::string_view filename = filenameFromPath(field->file()->name()); // Allow runtime to be null both to not crash if this is called before server initialization, // and so proto validation works in context where runtime singleton is not set up (e.g. // standalone config validation utilities) diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index 1f29ea1d7921..8a56d5185a11 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -200,20 +200,14 @@ class MessageUtil { using FileExtensions = ConstSingleton; - static std::size_t hash(const Protobuf::Message& message) { - // Use Protobuf::io::CodedOutputStream to force deterministic serialization, so that the same - // message doesn't hash to different values. - std::string text; - { - // For memory safety, the StringOutputStream needs to be destroyed before - // we read the string. - Protobuf::io::StringOutputStream string_stream(&text); - Protobuf::io::CodedOutputStream coded_stream(&string_stream); - coded_stream.SetSerializationDeterministic(true); - message.SerializeToCodedStream(&coded_stream); - } - return HashUtil::xxHash64(text); - } + /** + * A hash function uses Protobuf::TextFormat to force deterministic serialization recursively + * including known types in google.protobuf.Any. See + * https://github.com/protocolbuffers/protobuf/issues/5731 for the context. + * Using this function is discouraged, see discussion in + * https://github.com/envoyproxy/envoy/issues/8301. + */ + static std::size_t hash(const Protobuf::Message& message); static void loadFromJson(const std::string& json, Protobuf::Message& message, ProtobufMessage::ValidationVisitor& validation_visitor); diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index af180e85738d..c9fd1b300255 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -148,8 +148,9 @@ void RdsRouteConfigSubscription::onConfigUpdate( } } -void RdsRouteConfigSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason, - const EnvoyException*) { +void RdsRouteConfigSubscription::onConfigUpdateFailed( + Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException*) { + ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason); // We need to allow server startup to continue, even if we have a bad // config. init_target_.ready(); diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index 878cca0a3280..422551449acd 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -148,8 +148,9 @@ class ScopedRdsConfigSubscription : public Envoy::Config::DeltaConfigSubscriptio void onConfigUpdate(const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& version_info) override; - void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason, + void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException*) override { + ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason); DeltaConfigSubscriptionInstance::onConfigUpdateFailed(); } std::string resourceName(const ProtobufWkt::Any& resource) override { diff --git a/source/common/router/vhds.cc b/source/common/router/vhds.cc index 6cd05ed015f0..93d304647f7d 100644 --- a/source/common/router/vhds.cc +++ b/source/common/router/vhds.cc @@ -45,8 +45,9 @@ VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, *scope_, *this); } -void VhdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason, +void VhdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException*) { + ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason); // We need to allow server startup to continue, even if we have a bad // config. init_target_.ready(); diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index b1f4494b27a7..3d0a721c6aa1 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -19,6 +19,7 @@ #include "common/runtime/runtime_features.h" #include "absl/strings/match.h" +#include "absl/strings/numbers.h" #include "openssl/rand.h" namespace Envoy { @@ -276,6 +277,15 @@ uint64_t SnapshotImpl::getInteger(const std::string& key, uint64_t default_value } } +double SnapshotImpl::getDouble(const std::string& key, double default_value) const { + auto entry = values_.find(key); + if (entry == values_.end() || !entry->second.double_value_) { + return default_value; + } else { + return entry->second.double_value_.value(); + } +} + bool SnapshotImpl::getBoolean(absl::string_view key, bool& value) const { auto entry = values_.find(key); if (entry != values_.end() && entry->second.bool_value_.has_value()) { @@ -332,10 +342,10 @@ bool SnapshotImpl::parseEntryBooleanValue(Entry& entry) { return false; } -bool SnapshotImpl::parseEntryUintValue(Entry& entry) { - uint64_t converted_uint64; - if (absl::SimpleAtoi(entry.raw_string_value_, &converted_uint64)) { - entry.uint_value_ = converted_uint64; +bool SnapshotImpl::parseEntryDoubleValue(Entry& entry) { + double converted_double; + if (absl::SimpleAtod(entry.raw_string_value_, &converted_double)) { + entry.double_value_ = converted_double; return true; } return false; @@ -543,8 +553,9 @@ void RtdsSubscription::onConfigUpdate( onConfigUpdate(unwrapped_resource, resources[0].version()); } -void RtdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason, +void RtdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException*) { + ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason); // We need to allow server startup to continue, even if we have a bad // config. init_target_.ready(); diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 9b3eb6a50ce1..f5276961886d 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -92,6 +92,7 @@ class SnapshotImpl : public Snapshot, uint64_t random_value) const override; const std::string& get(const std::string& key) const override; uint64_t getInteger(const std::string& key, uint64_t default_value) const override; + double getDouble(const std::string& key, double default_value) const override; const std::vector& getLayers() const override; static Entry createEntry(const std::string& value); @@ -106,14 +107,21 @@ class SnapshotImpl : public Snapshot, if (parseEntryBooleanValue(entry)) { return; } - if (parseEntryUintValue(entry)) { + + if (parseEntryDoubleValue(entry) && entry.double_value_ >= 0 && + entry.double_value_ <= std::numeric_limits::max()) { + // Valid uint values will always be parseable as doubles, so we assign the value to both the + // uint and double fields. In cases where the value is something like "3.1", we will floor the + // number by casting it to a uint and assigning the uint value. + entry.uint_value_ = entry.double_value_; return; } + parseEntryFractionalPercentValue(entry); } static bool parseEntryBooleanValue(Entry& entry); - static bool parseEntryUintValue(Entry& entry); + static bool parseEntryDoubleValue(Entry& entry); static void parseEntryFractionalPercentValue(Entry& entry); const std::vector layers_; diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index 7d695040dfa9..4514f5ce0792 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -58,7 +58,9 @@ void SdsApi::onConfigUpdate(const Protobuf::RepeatedPtrFieldsecond; } + return fallback; +} - // Other tokens require holding a lock for our local cache. - absl::MutexLock lock(&mutex_); - Stats::StatName& stat_name = dynamic_stat_names_[token]; - if (stat_name.empty()) { // Note that builtin_stat_names_ already has one for "". - stat_name = pool_.add(token); +StatName StatNameSet::getDynamic(absl::string_view token) { + Stats::StatName stat_name = getBuiltin(token, StatName()); + if (stat_name.empty()) { + // Other tokens require holding a lock for our local cache. + absl::MutexLock lock(&mutex_); + Stats::StatName& stat_name_ref = dynamic_stat_names_[token]; + if (stat_name_ref.empty()) { // Note that builtin_stat_names_ already has one for "". + stat_name_ref = pool_.add(token); + } + stat_name = stat_name_ref; } return stat_name; } diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index b83da159c035..36cc04166765 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -658,6 +658,18 @@ class StatNameSet { */ void rememberBuiltin(absl::string_view str); + /** + * Remembers every string in a container as builtins. + */ + template void rememberBuiltins(const StringContainer& container) { + for (const auto& str : container) { + rememberBuiltin(str); + } + } + void rememberBuiltins(const std::vector& container) { + rememberBuiltins>(container); + } + /** * Finds a StatName by name. If 'token' has been remembered as a built-in, * then no lock is required. Otherwise we must consult dynamic_stat_names_ @@ -671,15 +683,19 @@ class StatNameSet { * set's mutex and also the SymbolTable mutex which must be taken during * StatNamePool::add(). */ - StatName getStatName(absl::string_view token); + StatName getDynamic(absl::string_view token); + StatName getBuiltin(absl::string_view token, StatName fallback); /** * Adds a StatName using the pool, but without remembering it in any maps. */ - StatName add(absl::string_view str) { return pool_.add(str); } + StatName add(absl::string_view str) { + absl::MutexLock lock(&mutex_); + return pool_.add(str); + } private: - Stats::StatNamePool pool_; + Stats::StatNamePool pool_ GUARDED_BY(mutex_); absl::Mutex mutex_; using StringStatNameMap = absl::flat_hash_map; StringStatNameMap builtin_stat_names_; diff --git a/source/common/thread_local/thread_local_impl.cc b/source/common/thread_local/thread_local_impl.cc index 5d9f584b517e..8bfb093befae 100644 --- a/source/common/thread_local/thread_local_impl.cc +++ b/source/common/thread_local/thread_local_impl.cc @@ -36,7 +36,7 @@ SlotPtr InstanceImpl::allocateSlot() { ASSERT(idx < slots_.size()); std::unique_ptr slot(new SlotImpl(*this, idx)); slots_[idx] = slot.get(); - return slot; + return std::make_unique(*this, std::move(slot)); } bool InstanceImpl::SlotImpl::currentThreadRegistered() { diff --git a/source/common/thread_local/thread_local_impl.h b/source/common/thread_local/thread_local_impl.h index 49f1889e44d7..b451c4eb236a 100644 --- a/source/common/thread_local/thread_local_impl.h +++ b/source/common/thread_local/thread_local_impl.h @@ -99,6 +99,9 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub std::thread::id main_thread_id_; Event::Dispatcher* main_thread_dispatcher_{}; std::atomic shutdown_{}; + + // Test only. + friend class ThreadLocalInstanceImplTest; }; } // namespace ThreadLocal diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index 41f7ec4faca6..f6d93f3da6b4 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -77,6 +77,8 @@ void CdsApiImpl::onConfigUpdate( if (cm_.addOrUpdateCluster(cluster, resource.version())) { any_applied = true; ENVOY_LOG(debug, "cds: add/update cluster '{}'", cluster.name()); + } else { + ENVOY_LOG(debug, "cds: add/update cluster '{}' skipped", cluster.name()); } } catch (const EnvoyException& e) { exception_msgs.push_back(fmt::format("{}: {}", cluster.name(), e.what())); @@ -99,8 +101,9 @@ void CdsApiImpl::onConfigUpdate( } } -void CdsApiImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason, +void CdsApiImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException*) { + ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason); // We need to allow server startup to continue, even if we have a bad // config. runInitializeCallbackIfAny(); diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 3d795c9a207b..b1064a4501d3 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -253,11 +253,7 @@ bool EdsClusterImpl::updateHostsPerLocality( void EdsClusterImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException*) { - // We should not call onPreInitComplete if this method is called because of stream disconnection. - // This might potentially hang the initialization forever, if init_fetch_timeout is disabled. - if (reason == Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure) { - return; - } + ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason); // We need to allow server startup to continue, even if we have a bad config. onPreInitComplete(); } diff --git a/source/exe/BUILD b/source/exe/BUILD index 7ed716a317a6..719cded40cc6 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -7,7 +7,6 @@ load( "envoy_cc_platform_dep", "envoy_cc_posix_library", "envoy_cc_win32_library", - "envoy_google_grpc_external_deps", "envoy_package", ) load( @@ -72,6 +71,7 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:compiler_requirements_lib", "//source/common/common:perf_annotation_lib", + "//source/common/grpc:google_grpc_context_lib", "//source/common/stats:symbol_table_creator_lib", "//source/server:hot_restart_lib", "//source/server:hot_restart_nop_lib", @@ -95,7 +95,7 @@ envoy_cc_library( "//source/common/event:libevent_lib", "//source/common/http/http2:nghttp2_lib", "//source/server:proto_descriptors_lib", - ] + envoy_google_grpc_external_deps(), + ], ) envoy_cc_library( diff --git a/source/exe/main_common.h b/source/exe/main_common.h index a0a4796de18e..228f6e5e04a2 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -5,6 +5,7 @@ #include "common/common/thread.h" #include "common/event/real_time_system.h" +#include "common/grpc/google_grpc_context.h" #include "common/stats/fake_symbol_table_impl.h" #include "common/stats/thread_local_store.h" #include "common/thread_local/thread_local_impl.h" @@ -65,7 +66,8 @@ class MainCommonBase { const AdminRequestFn& handler); protected: - ProcessWide process_wide_; // Process-wide state setup/teardown. + ProcessWide process_wide_; // Process-wide state setup/teardown (excluding grpc). + Grpc::GoogleGrpcContext google_grpc_context_; const Envoy::OptionsImpl& options_; Server::ComponentFactory& component_factory_; Thread::ThreadFactory& thread_factory_; diff --git a/source/exe/process_wide.cc b/source/exe/process_wide.cc index 62ae2a7515ea..219921c081bb 100644 --- a/source/exe/process_wide.cc +++ b/source/exe/process_wide.cc @@ -8,10 +8,6 @@ #include "ares.h" -#ifdef ENVOY_GOOGLE_GRPC -#include "grpc/grpc.h" -#endif - namespace Envoy { namespace { // Static variable to count initialization pairs. For tests like @@ -22,13 +18,25 @@ uint32_t process_wide_initialized; ProcessWide::ProcessWide() : initialization_depth_(process_wide_initialized) { if (process_wide_initialized++ == 0) { -#ifdef ENVOY_GOOGLE_GRPC - grpc_init(); -#endif ares_library_init(ARES_LIB_INIT_ALL); Event::Libevent::Global::initialize(); Envoy::Server::validateProtoDescriptors(); Http::Http2::initializeNghttp2Logging(); + + // We do not initialize Google gRPC here -- we instead instantiate + // Grpc::GoogleGrpcContext in MainCommon immediately after instantiating + // ProcessWide. This is because ProcessWide is instantiated in the unit-test + // flow in test/test_runner.h, and grpc_init() instantiates threads which + // allocate memory asynchronous to running tests, making it hard to + // accurately measure memory consumption, and making unit-test debugging + // non-deterministic. See https://github.com/envoyproxy/envoy/issues/8282 + // for details. Of course we also need grpc_init called in unit-tests that + // test Google gRPC, and the relevant classes must also instantiate + // Grpc::GoogleGrpcContext, which allows for nested instantiation. + // + // It appears that grpc_init() started instantiating threads in grpc 1.22.1, + // which was integrated in https://github.com/envoyproxy/envoy/pull/8196, + // around the time the flakes in #8282 started being reported. } } @@ -37,9 +45,6 @@ ProcessWide::~ProcessWide() { if (--process_wide_initialized == 0) { process_wide_initialized = false; ares_library_cleanup(); -#ifdef ENVOY_GOOGLE_GRPC - grpc_shutdown(); -#endif } ASSERT(process_wide_initialized == initialization_depth_); } diff --git a/source/exe/process_wide.h b/source/exe/process_wide.h index 2c305e8f1c27..bf77f6650e83 100644 --- a/source/exe/process_wide.h +++ b/source/exe/process_wide.h @@ -5,7 +5,7 @@ namespace Envoy { // Process-wide lifecycle events for global state in third-party dependencies, -// e.g. gRPC, c-ares. There should only ever be a single instance of this. +// e.g. c-ares. There should only ever be a single instance of this. class ProcessWide { public: ProcessWide(); diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc index a8610a68f050..42636373a501 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc @@ -122,6 +122,11 @@ void Utility::extractCommonAccessLogProperties( *stream_info.downstreamRemoteAddress(), *common_access_log.mutable_downstream_remote_address()); } + if (stream_info.downstreamDirectRemoteAddress() != nullptr) { + Network::Utility::addressToProtobufAddress( + *stream_info.downstreamDirectRemoteAddress(), + *common_access_log.mutable_downstream_direct_remote_address()); + } if (stream_info.downstreamLocalAddress() != nullptr) { Network::Utility::addressToProtobufAddress( *stream_info.downstreamLocalAddress(), diff --git a/source/extensions/filters/common/ext_authz/BUILD b/source/extensions/filters/common/ext_authz/BUILD index 8b3d30d012fd..61b2abade1cb 100644 --- a/source/extensions/filters/common/ext_authz/BUILD +++ b/source/extensions/filters/common/ext_authz/BUILD @@ -63,6 +63,7 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/http:async_client_lib", "//source/common/http:codes_lib", + "//source/common/tracing:http_tracer_lib", "@envoy_api//envoy/config/filter/http/ext_authz/v2:ext_authz_cc", ], ) diff --git a/source/extensions/filters/common/ext_authz/ext_authz.h b/source/extensions/filters/common/ext_authz/ext_authz.h index f79df739884c..3d0bc3ab9038 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz.h +++ b/source/extensions/filters/common/ext_authz/ext_authz.h @@ -10,12 +10,26 @@ #include "envoy/service/auth/v2/external_auth.pb.h" #include "envoy/tracing/http_tracer.h" +#include "common/singleton/const_singleton.h" + namespace Envoy { namespace Extensions { namespace Filters { namespace Common { namespace ExtAuthz { +/** + * Constant values used for tracing metadata. + */ +struct TracingContantValues { + const std::string TraceStatus = "ext_authz_status"; + const std::string TraceUnauthz = "ext_authz_unauthorized"; + const std::string TraceOk = "ext_authz_ok"; + const std::string HttpStatus = "ext_authz_http_status"; +}; + +using TracingConstants = ConstSingleton; + /** * Possible async results for a check call. */ diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index 02d1298c5f67..9bfb54b3498b 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -45,13 +45,13 @@ void GrpcClientImpl::onSuccess(std::unique_ptr(Response{}); if (response->status().code() == Grpc::Status::GrpcStatus::Ok) { - span.setTag(Constants::get().TraceStatus, Constants::get().TraceOk); + span.setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceOk); authz_response->status = CheckStatus::OK; if (response->has_ok_response()) { toAuthzResponseHeader(authz_response, response->ok_response().headers()); } } else { - span.setTag(Constants::get().TraceStatus, Constants::get().TraceUnauthz); + span.setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceUnauthz); authz_response->status = CheckStatus::Denied; if (response->has_denied_response()) { toAuthzResponseHeader(authz_response, response->denied_response().headers()); diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h index df06350ebf5d..c5e20dea7717 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h @@ -17,7 +17,6 @@ #include "envoy/upstream/cluster_manager.h" #include "common/grpc/typed_async_client.h" -#include "common/singleton/const_singleton.h" #include "extensions/filters/common/ext_authz/check_request_utils.h" #include "extensions/filters/common/ext_authz/ext_authz.h" @@ -30,14 +29,6 @@ namespace ExtAuthz { using ExtAuthzAsyncCallbacks = Grpc::AsyncRequestCallbacks; -struct ConstantValues { - const std::string TraceStatus = "ext_authz_status"; - const std::string TraceUnauthz = "ext_authz_unauthorized"; - const std::string TraceOk = "ext_authz_ok"; -}; - -using Constants = ConstSingleton; - /* * This client implementation is used when the Ext_Authz filter needs to communicate with an gRPC * authorization server. Unlike the HTTP client, the gRPC allows the server to define response diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index b49960bf511a..02728b63e123 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -1,6 +1,7 @@ #include "extensions/filters/common/ext_authz/ext_authz_http_impl.h" #include "common/common/enum_to_int.h" +#include "common/common/fmt.h" #include "common/http/async_client_impl.h" #include "common/http/codes.h" @@ -88,7 +89,8 @@ ClientConfig::ClientConfig(const envoy::config::filter::http::ext_authz::v2::Ext authorization_headers_to_add_( toHeadersAdd(config.http_service().authorization_request().headers_to_add())), cluster_name_(config.http_service().server_uri().cluster()), timeout_(timeout), - path_prefix_(path_prefix) {} + path_prefix_(path_prefix), + tracing_name_(fmt::format("async {} egress", config.http_service().server_uri().cluster())) {} MatcherSharedPtr ClientConfig::toRequestMatchers(const envoy::type::matcher::ListStringMatcher& list) { @@ -150,23 +152,35 @@ Http::LowerCaseStrPairVector ClientConfig::toHeadersAdd( return header_vec; } -RawHttpClientImpl::RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config) - : cm_(cm), config_(config) {} +RawHttpClientImpl::RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config, + TimeSource& time_source) + : cm_(cm), config_(config), time_source_(time_source) {} -RawHttpClientImpl::~RawHttpClientImpl() { ASSERT(!callbacks_); } +RawHttpClientImpl::~RawHttpClientImpl() { + ASSERT(callbacks_ == nullptr); + ASSERT(span_ == nullptr); +} void RawHttpClientImpl::cancel() { ASSERT(callbacks_ != nullptr); + ASSERT(span_ != nullptr); + span_->setTag(Tracing::Tags::get().Status, Tracing::Tags::get().Canceled); + span_->finishSpan(); request_->cancel(); callbacks_ = nullptr; + span_ = nullptr; } // Client void RawHttpClientImpl::check(RequestCallbacks& callbacks, const envoy::service::auth::v2::CheckRequest& request, - Tracing::Span&) { + Tracing::Span& parent_span) { ASSERT(callbacks_ == nullptr); + ASSERT(span_ == nullptr); callbacks_ = &callbacks; + span_ = parent_span.spawnChild(Tracing::EgressConfig::get(), config_->tracingName(), + time_source_.systemTime()); + span_->setTag(Tracing::Tags::get().UpstreamCluster, config_->cluster()); Http::HeaderMapPtr headers; const uint64_t request_length = request.attributes().request().http().body().size(); @@ -210,7 +224,10 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, // TODO(dio): Add stats and tracing related to this. ENVOY_LOG(debug, "ext_authz cluster '{}' does not exist", cluster); callbacks_->onComplete(std::make_unique(errorResponse())); + span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); + span_->finishSpan(); callbacks_ = nullptr; + span_ = nullptr; } else { request_ = cm_.httpAsyncClientForCluster(cluster).send( std::move(message), *this, @@ -220,13 +237,18 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, void RawHttpClientImpl::onSuccess(Http::MessagePtr&& message) { callbacks_->onComplete(toResponse(std::move(message))); + span_->finishSpan(); callbacks_ = nullptr; + span_ = nullptr; } void RawHttpClientImpl::onFailure(Http::AsyncClient::FailureReason reason) { ASSERT(reason == Http::AsyncClient::FailureReason::Reset); callbacks_->onComplete(std::make_unique(errorResponse())); + span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); + span_->finishSpan(); callbacks_ = nullptr; + span_ = nullptr; } ResponsePtr RawHttpClientImpl::toResponse(Http::MessagePtr message) { @@ -235,9 +257,13 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::MessagePtr message) { uint64_t status_code{}; if (!absl::SimpleAtoi(message->headers().Status()->value().getStringView(), &status_code)) { ENVOY_LOG(warn, "ext_authz HTTP client failed to parse the HTTP status code."); + span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); return std::make_unique(errorResponse()); } + span_->setTag(TracingConstants::get().HttpStatus, + Http::CodeUtility::toString(static_cast(status_code))); + // Set an error status if the call to the authorization server returns any of the 5xx HTTP error // codes. A Forbidden response is sent to the client if the filter has not been configured with // failure_mode_allow. @@ -250,6 +276,7 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::MessagePtr message) { SuccessResponse ok{message->headers(), config_->upstreamHeaderMatchers(), Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, EMPTY_STRING, Http::Code::OK}}; + span_->setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceOk); return std::move(ok.response_); } @@ -257,6 +284,7 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::MessagePtr message) { SuccessResponse denied{message->headers(), config_->clientHeaderMatchers(), Response{CheckStatus::Denied, Http::HeaderVector{}, Http::HeaderVector{}, message->bodyAsString(), static_cast(status_code)}}; + span_->setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceUnauthz); return std::move(denied.response_); } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index 6d4c57128a84..a7c4c4eebced 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/config/filter/http/ext_authz/v2/ext_authz.pb.h" +#include "envoy/tracing/http_tracer.h" #include "envoy/upstream/cluster_manager.h" #include "common/common/logger.h" @@ -88,16 +89,21 @@ class ClientConfig { const MatcherSharedPtr& clientHeaderMatchers() const { return client_header_matchers_; } /** - * Returns a list of matchers used for selecting the authorization response headers that + * Returns a list of matchers used for selecting the authorization response headers that * should be send to an the upstream server. */ const MatcherSharedPtr& upstreamHeaderMatchers() const { return upstream_header_matchers_; } /** - * @return List of headers that will be add to the authorization request. + * Returns a list of headers that will be add to the authorization request. */ const Http::LowerCaseStrPairVector& headersToAdd() const { return authorization_headers_to_add_; } + /** + * Returns the name used for tracing. + */ + const std::string& tracingName() { return tracing_name_; } + private: static MatcherSharedPtr toRequestMatchers(const envoy::type::matcher::ListStringMatcher& matcher); static MatcherSharedPtr toClientMatchers(const envoy::type::matcher::ListStringMatcher& matcher); @@ -113,6 +119,7 @@ class ClientConfig { const std::string cluster_name_; const std::chrono::milliseconds timeout_; const std::string path_prefix_; + const std::string tracing_name_; }; using ClientConfigSharedPtr = std::shared_ptr; @@ -128,7 +135,8 @@ class RawHttpClientImpl : public Client, public Http::AsyncClient::Callbacks, Logger::Loggable { public: - explicit RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config); + explicit RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config, + TimeSource& time_source); ~RawHttpClientImpl() override; // ExtAuthz::Client @@ -146,6 +154,8 @@ class RawHttpClientImpl : public Client, ClientConfigSharedPtr config_; Http::AsyncClient::Request* request_{}; RequestCallbacks* callbacks_{}; + TimeSource& time_source_; + Tracing::SpanPtr span_; }; } // namespace ExtAuthz diff --git a/source/extensions/filters/http/dynamo/dynamo_filter.cc b/source/extensions/filters/http/dynamo/dynamo_filter.cc index c9072aaf5b1a..8312d768f9fe 100644 --- a/source/extensions/filters/http/dynamo/dynamo_filter.cc +++ b/source/extensions/filters/http/dynamo/dynamo_filter.cc @@ -176,11 +176,11 @@ void DynamoFilter::chargeStatsPerEntity(const std::string& entity, const std::st time_source_.monotonicTime() - start_decode_); size_t group_index = DynamoStats::groupIndex(status); - const Stats::StatName entity_type_name = stats_->getStatName(entity_type); - const Stats::StatName entity_name = stats_->getStatName(entity); - const Stats::StatName total_name = - stats_->getStatName(absl::StrCat("upstream_rq_total_", status)); - const Stats::StatName time_name = stats_->getStatName(absl::StrCat("upstream_rq_time_", status)); + const Stats::StatName entity_type_name = + stats_->getBuiltin(entity_type, stats_->unknown_entity_type_); + const Stats::StatName entity_name = stats_->getDynamic(entity); + const Stats::StatName total_name = stats_->getDynamic(absl::StrCat("upstream_rq_total_", status)); + const Stats::StatName time_name = stats_->getDynamic(absl::StrCat("upstream_rq_time_", status)); stats_->counter({entity_type_name, entity_name, stats_->upstream_rq_total_}).inc(); const Stats::StatName total_group = stats_->upstream_rq_total_groups_[group_index]; @@ -200,7 +200,7 @@ void DynamoFilter::chargeUnProcessedKeysStats(const Json::Object& json_body) { std::vector unprocessed_tables = RequestParser::parseBatchUnProcessedKeys(json_body); for (const std::string& unprocessed_table : unprocessed_tables) { stats_ - ->counter({stats_->error_, stats_->getStatName(unprocessed_table), + ->counter({stats_->error_, stats_->getDynamic(unprocessed_table), stats_->batch_failure_unprocessed_keys_}) .inc(); } @@ -211,11 +211,11 @@ void DynamoFilter::chargeFailureSpecificStats(const Json::Object& json_body) { if (!error_type.empty()) { if (table_descriptor_.table_name.empty()) { - stats_->counter({stats_->error_, stats_->no_table_, stats_->getStatName(error_type)}).inc(); + stats_->counter({stats_->error_, stats_->no_table_, stats_->getDynamic(error_type)}).inc(); } else { stats_ - ->counter({stats_->error_, stats_->getStatName(table_descriptor_.table_name), - stats_->getStatName(error_type)}) + ->counter({stats_->error_, stats_->getDynamic(table_descriptor_.table_name), + stats_->getDynamic(error_type)}) .inc(); } } else { diff --git a/source/extensions/filters/http/dynamo/dynamo_stats.cc b/source/extensions/filters/http/dynamo/dynamo_stats.cc index 543fcc5f82d5..53e51133394c 100644 --- a/source/extensions/filters/http/dynamo/dynamo_stats.cc +++ b/source/extensions/filters/http/dynamo/dynamo_stats.cc @@ -28,7 +28,9 @@ DynamoStats::DynamoStats(Stats::Scope& scope, const std::string& prefix) operation_missing_(stat_name_set_.add("operation_missing")), table_(stat_name_set_.add("table")), table_missing_(stat_name_set_.add("table_missing")), upstream_rq_time_(stat_name_set_.add("upstream_rq_time")), - upstream_rq_total_(stat_name_set_.add("upstream_rq_total")) { + upstream_rq_total_(stat_name_set_.add("upstream_rq_total")), + unknown_entity_type_(stat_name_set_.add("unknown_entity_type")), + unknown_operation_(stat_name_set_.add("unknown_operation")) { upstream_rq_total_groups_[0] = stat_name_set_.add("upstream_rq_total_unknown"); upstream_rq_time_groups_[0] = stat_name_set_.add("upstream_rq_time_unknown"); for (size_t i = 1; i < DynamoStats::NumGroupEntries; ++i) { @@ -37,6 +39,11 @@ DynamoStats::DynamoStats(Stats::Scope& scope, const std::string& prefix) } RequestParser::forEachStatString( [this](const std::string& str) { stat_name_set_.rememberBuiltin(str); }); + for (uint32_t status_code : {200, 400, 403, 502}) { + stat_name_set_.rememberBuiltin(absl::StrCat("upstream_rq_time_", status_code)); + stat_name_set_.rememberBuiltin(absl::StrCat("upstream_rq_total_", status_code)); + } + stat_name_set_.rememberBuiltins({"operation", "table"}); } Stats::SymbolTable::StoragePtr DynamoStats::addPrefix(const Stats::StatNameVec& names) { @@ -62,9 +69,9 @@ Stats::Counter& DynamoStats::buildPartitionStatCounter(const std::string& table_ const std::string& partition_id) { // Use the last 7 characters of the partition id. absl::string_view id_last_7 = absl::string_view(partition_id).substr(partition_id.size() - 7); - const Stats::SymbolTable::StoragePtr stat_name_storage = - addPrefix({table_, getStatName(table_name), capacity_, getStatName(operation), - getStatName(absl::StrCat("__partition_id=", id_last_7))}); + const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix( + {table_, getDynamic(table_name), capacity_, getBuiltin(operation, unknown_operation_), + getDynamic(absl::StrCat("__partition_id=", id_last_7))}); return scope_.counterFromStatName(Stats::StatName(stat_name_storage.get())); } diff --git a/source/extensions/filters/http/dynamo/dynamo_stats.h b/source/extensions/filters/http/dynamo/dynamo_stats.h index 40837a95f0e9..de5fab8ff523 100644 --- a/source/extensions/filters/http/dynamo/dynamo_stats.h +++ b/source/extensions/filters/http/dynamo/dynamo_stats.h @@ -37,7 +37,10 @@ class DynamoStats { * TODO(jmarantz): Potential perf issue here with mutex contention for names * that have not been remembered as builtins in the constructor. */ - Stats::StatName getStatName(const std::string& str) { return stat_name_set_.getStatName(str); } + Stats::StatName getDynamic(const std::string& str) { return stat_name_set_.getDynamic(str); } + Stats::StatName getBuiltin(const std::string& str, Stats::StatName fallback) { + return stat_name_set_.getBuiltin(str, fallback); + } private: Stats::SymbolTable::StoragePtr addPrefix(const Stats::StatNameVec& names); @@ -61,6 +64,8 @@ class DynamoStats { const Stats::StatName upstream_rq_time_; const Stats::StatName upstream_rq_total_; const Stats::StatName upstream_rq_unknown_; + const Stats::StatName unknown_entity_type_; + const Stats::StatName unknown_operation_; // Keep group codes for HTTP status codes through the 500s. static constexpr size_t NumGroupEntries = 6; diff --git a/source/extensions/filters/http/ext_authz/config.cc b/source/extensions/filters/http/ext_authz/config.cc index fb7ba3895e4e..6ff83d5ea8df 100644 --- a/source/extensions/filters/http/ext_authz/config.cc +++ b/source/extensions/filters/http/ext_authz/config.cc @@ -34,7 +34,7 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( callback = [filter_config, client_config, &context](Http::FilterChainFactoryCallbacks& callbacks) { auto client = std::make_unique( - context.clusterManager(), client_config); + context.clusterManager(), client_config, context.timeSource()); callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{ std::make_shared(filter_config, std::move(client))}); }; diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index 0fb09f1c95f1..65d1211f6623 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -86,7 +86,7 @@ FaultFilterConfig::FaultFilterConfig(const envoy::config::filter::http::fault::v void FaultFilterConfig::incCounter(absl::string_view downstream_cluster, Stats::StatName stat_name) { Stats::SymbolTable::StoragePtr storage = scope_.symbolTable().join( - {stats_prefix_, stat_name_set_.getStatName(downstream_cluster), stat_name}); + {stats_prefix_, stat_name_set_.getDynamic(downstream_cluster), stat_name}); scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); } diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 0e7f8caba9a8..6b9960218fa2 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -109,9 +109,13 @@ JsonTranscoderConfig::JsonTranscoderConfig( } for (const auto& file : descriptor_set.file()) { - if (descriptor_pool_.BuildFile(file) == nullptr) { - throw EnvoyException("transcoding_filter: Unable to build proto descriptor pool"); - } + addFileDescriptor(file); + } + + convert_grpc_status_ = proto_config.convert_grpc_status(); + if (convert_grpc_status_) { + addBuiltinSymbolDescriptor("google.protobuf.Any"); + addBuiltinSymbolDescriptor("google.rpc.Status"); } PathMatcherBuilder pmb; @@ -162,10 +166,34 @@ JsonTranscoderConfig::JsonTranscoderConfig( ignore_unknown_query_parameters_ = proto_config.ignore_unknown_query_parameters(); } +void JsonTranscoderConfig::addFileDescriptor(const Protobuf::FileDescriptorProto& file) { + if (descriptor_pool_.BuildFile(file) == nullptr) { + throw EnvoyException("transcoding_filter: Unable to build proto descriptor pool"); + } +} + +void JsonTranscoderConfig::addBuiltinSymbolDescriptor(const std::string& symbol_name) { + if (descriptor_pool_.FindFileContainingSymbol(symbol_name) != nullptr) { + return; + } + + auto* builtin_pool = Protobuf::DescriptorPool::generated_pool(); + if (!builtin_pool) { + return; + } + + Protobuf::DescriptorPoolDatabase pool_database(*builtin_pool); + Protobuf::FileDescriptorProto file_proto; + pool_database.FindFileContainingSymbol(symbol_name, &file_proto); + addFileDescriptor(file_proto); +} + bool JsonTranscoderConfig::matchIncomingRequestInfo() const { return match_incoming_request_route_; } +bool JsonTranscoderConfig::convertGrpcStatus() const { return convert_grpc_status_; } + ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( const Http::HeaderMap& headers, ZeroCopyInputStream& request_input, google::grpc::transcoding::TranscoderInputStream& response_input, @@ -242,6 +270,14 @@ JsonTranscoderConfig::methodToRequestInfo(const Protobuf::MethodDescriptor* meth return ProtobufUtil::Status(); } +ProtobufUtil::Status +JsonTranscoderConfig::translateProtoMessageToJson(const Protobuf::Message& message, + std::string* json_out) { + return ProtobufUtil::BinaryToJsonString( + type_helper_->Resolver(), Grpc::Common::typeUrl(message.GetDescriptor()->full_name()), + message.SerializeAsString(), json_out, print_options_); +} + JsonTranscoderFilter::JsonTranscoderFilter(JsonTranscoderConfig& config) : config_(config) {} Http::FilterHeadersStatus JsonTranscoderFilter::decodeHeaders(Http::HeaderMap& headers, @@ -364,9 +400,17 @@ Http::FilterHeadersStatus JsonTranscoderFilter::encodeHeaders(Http::HeaderMap& h response_headers_ = &headers; if (end_stream) { + + if (method_->server_streaming()) { + // When there is no body in a streaming response, a empty JSON array is + // returned by default. Set the content type correctly. + headers.insertContentType().value().setReference(Http::Headers::get().ContentTypeValues.Json); + } + // In gRPC wire protocol, headers frame with end_stream is a trailers-only response. // The return value from encodeTrailers is ignored since it is always continue. encodeTrailers(headers); + return Http::FilterHeadersStatus::Continue; } @@ -383,6 +427,8 @@ Http::FilterDataStatus JsonTranscoderFilter::encodeData(Buffer::Instance& data, return Http::FilterDataStatus::Continue; } + has_body_ = true; + // TODO(dio): Add support for streaming case. if (has_http_body_output_) { buildResponseFromHttpBodyOutput(*response_headers_, data); @@ -418,6 +464,7 @@ Http::FilterTrailersStatus JsonTranscoderFilter::encodeTrailers(Http::HeaderMap& if (data.length()) { encoder_callbacks_->addEncodedData(data, true); + has_body_ = true; } if (method_->server_streaming()) { @@ -425,18 +472,34 @@ Http::FilterTrailersStatus JsonTranscoderFilter::encodeTrailers(Http::HeaderMap& return Http::FilterTrailersStatus::Continue; } + // If there was no previous headers frame, this |trailers| map is our |response_headers_|, + // so there is no need to copy headers from one to the other. + bool is_trailers_only_response = response_headers_ == &trailers; + const absl::optional grpc_status = Grpc::Common::getGrpcStatus(trailers); + bool status_converted_to_json = grpc_status && maybeConvertGrpcStatus(*grpc_status, trailers); + if (!grpc_status || grpc_status.value() == Grpc::Status::GrpcStatus::InvalidCode) { response_headers_->Status()->value(enumToInt(Http::Code::ServiceUnavailable)); } else { response_headers_->Status()->value(Grpc::Utility::grpcToHttpStatus(grpc_status.value())); - response_headers_->insertGrpcStatus().value(enumToInt(grpc_status.value())); + if (!status_converted_to_json && !is_trailers_only_response) { + response_headers_->insertGrpcStatus().value(enumToInt(grpc_status.value())); + } } - const Http::HeaderEntry* grpc_message_header = trailers.GrpcMessage(); - if (grpc_message_header) { - response_headers_->insertGrpcMessage().value(*grpc_message_header); + if (status_converted_to_json && is_trailers_only_response) { + // Drop the gRPC status headers, we already have them in the JSON body. + response_headers_->removeGrpcStatus(); + response_headers_->removeGrpcMessage(); + response_headers_->remove(Http::Headers::get().GrpcStatusDetailsBin); + } else if (!status_converted_to_json && !is_trailers_only_response) { + // Copy the grpc-message header if it exists. + const Http::HeaderEntry* grpc_message_header = trailers.GrpcMessage(); + if (grpc_message_header) { + response_headers_->insertGrpcMessage().value(*grpc_message_header); + } } // remove Trailer headers if the client connection was http/1 @@ -494,6 +557,56 @@ void JsonTranscoderFilter::buildResponseFromHttpBodyOutput(Http::HeaderMap& resp } } +bool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_status, + Http::HeaderMap& trailers) { + if (!config_.convertGrpcStatus()) { + return false; + } + + // We do not support responses with a separate trailer frame. + // TODO(ascheglov): remove this if after HCM can buffer data added from |encodeTrailers|. + if (response_headers_ != &trailers) { + return false; + } + + // Send a serialized status only if there was no body. + if (has_body_) { + return false; + } + + if (grpc_status == Grpc::Status::GrpcStatus::Ok || + grpc_status == Grpc::Status::GrpcStatus::InvalidCode) { + return false; + } + + auto status_details = Grpc::Common::getGrpcStatusDetailsBin(trailers); + if (!status_details) { + // If no rpc.Status object was sent in the grpc-status-details-bin header, + // construct it from the grpc-status and grpc-message headers. + status_details.emplace(); + status_details->set_code(grpc_status); + + auto grpc_message_header = trailers.GrpcMessage(); + if (grpc_message_header) { + auto message = grpc_message_header->value().getStringView(); + status_details->set_message(message.data(), message.size()); + } + } + + std::string json_status; + auto translate_status = config_.translateProtoMessageToJson(*status_details, &json_status); + if (!translate_status.ok()) { + ENVOY_LOG(debug, "Transcoding status error {}", translate_status.ToString()); + return false; + } + + response_headers_->insertContentType().value().setReference( + Http::Headers::get().ContentTypeValues.Json); + Buffer::OwnedImpl status_data(json_status); + encoder_callbacks_->addEncodedData(status_data, false); + return true; +} + bool JsonTranscoderFilter::hasHttpBodyAsOutputType() { return method_->output_type()->full_name() == google::api::HttpBody::descriptor()->full_name(); } diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h index 21976ef9b760..a3bae5e4e027 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h @@ -68,6 +68,12 @@ class JsonTranscoderConfig : public Logger::Loggable { std::unique_ptr& transcoder, const Protobuf::MethodDescriptor*& method_descriptor); + /** + * Converts an arbitrary protobuf message to JSON. + */ + ProtobufUtil::Status translateProtoMessageToJson(const Protobuf::Message& message, + std::string* json_out); + /** * If true, skip clearing the route cache after the incoming request has been modified. * This allows Envoy to select the upstream cluster based on the incoming request @@ -75,6 +81,12 @@ class JsonTranscoderConfig : public Logger::Loggable { */ bool matchIncomingRequestInfo() const; + /** + * If true, when trailer indicates a gRPC error and there was no HTTP body, + * make google.rpc.Status out of gRPC status headers and use it as JSON body. + */ + bool convertGrpcStatus() const; + private: /** * Convert method descriptor to RequestInfo that needed for transcoding library @@ -83,6 +95,9 @@ class JsonTranscoderConfig : public Logger::Loggable { google::grpc::transcoding::RequestInfo* info); private: + void addFileDescriptor(const Protobuf::FileDescriptorProto& file); + void addBuiltinSymbolDescriptor(const std::string& symbol_name); + Protobuf::DescriptorPool descriptor_pool_; google::grpc::transcoding::PathMatcherPtr path_matcher_; std::unique_ptr type_helper_; @@ -90,6 +105,7 @@ class JsonTranscoderConfig : public Logger::Loggable { bool match_incoming_request_route_{false}; bool ignore_unknown_query_parameters_{false}; + bool convert_grpc_status_{false}; }; using JsonTranscoderConfigSharedPtr = std::shared_ptr; @@ -125,6 +141,7 @@ class JsonTranscoderFilter : public Http::StreamFilter, public Logger::Loggable< private: bool readToBuffer(Protobuf::io::ZeroCopyInputStream& stream, Buffer::Instance& data); void buildResponseFromHttpBodyOutput(Http::HeaderMap& response_headers, Buffer::Instance& data); + bool maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_status, Http::HeaderMap& trailers); bool hasHttpBodyAsOutputType(); JsonTranscoderConfig& config_; @@ -139,6 +156,7 @@ class JsonTranscoderFilter : public Http::StreamFilter, public Logger::Loggable< bool error_{false}; bool has_http_body_output_{false}; + bool has_body_{false}; }; } // namespace GrpcJsonTranscoder diff --git a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc index d3b2549a87f7..3d739e8f7933 100644 --- a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc +++ b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.cc @@ -52,7 +52,7 @@ IpTaggingFilterConfig::IpTaggingFilterConfig( void IpTaggingFilterConfig::incCounter(Stats::StatName name, absl::string_view tag) { Stats::SymbolTable::StoragePtr storage = - scope_.symbolTable().join({stats_prefix_, stat_name_set_.getStatName(tag), name}); + scope_.symbolTable().join({stats_prefix_, stat_name_set_.getDynamic(tag), name}); scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); } diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.cc b/source/extensions/filters/listener/http_inspector/http_inspector.cc index 4a31065b71f8..ce1893f1bb24 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.cc +++ b/source/extensions/filters/listener/http_inspector/http_inspector.cc @@ -41,45 +41,96 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { return Network::FilterStatus::Continue; } - ASSERT(file_event_ == nullptr); - - file_event_ = cb.dispatcher().createFileEvent( - socket.ioHandle().fd(), - [this](uint32_t events) { - ASSERT(events == Event::FileReadyType::Read); - onRead(); - }, - Event::FileTriggerType::Edge, Event::FileReadyType::Read); - cb_ = &cb; - return Network::FilterStatus::StopIteration; + const ParseState parse_state = onRead(); + switch (parse_state) { + case ParseState::Error: + // As per discussion in https://github.com/envoyproxy/envoy/issues/7864 + // we don't add new enum in FilterStatus so we have to signal the caller + // the new condition. + cb.socket().close(); + return Network::FilterStatus::StopIteration; + case ParseState::Done: + return Network::FilterStatus::Continue; + case ParseState::Continue: + // do nothing but create the event + ASSERT(file_event_ == nullptr); + file_event_ = cb.dispatcher().createFileEvent( + socket.ioHandle().fd(), + [this](uint32_t events) { + ENVOY_LOG(trace, "http inspector event: {}", events); + // inspector is always peeking and can never determine EOF. + // Use this event type to avoid listener timeout on the OS supporting + // FileReadyType::Closed. + bool end_stream = events & Event::FileReadyType::Closed; + + const ParseState parse_state = onRead(); + switch (parse_state) { + case ParseState::Error: + file_event_.reset(); + cb_->continueFilterChain(false); + break; + case ParseState::Done: + file_event_.reset(); + // Do not skip following listener filters. + cb_->continueFilterChain(true); + break; + case ParseState::Continue: + if (end_stream) { + // Parser fails to determine http but the end of stream is reached. Fallback to + // non-http. + done(false); + file_event_.reset(); + cb_->continueFilterChain(true); + } + // do nothing but wait for the next event + break; + } + }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read | Event::FileReadyType::Closed); + return Network::FilterStatus::StopIteration; + } + NOT_REACHED_GCOVR_EXCL_LINE } -void Filter::onRead() { +ParseState Filter::onRead() { auto& os_syscalls = Api::OsSysCallsSingleton::get(); const Network::ConnectionSocket& socket = cb_->socket(); const Api::SysCallSizeResult result = os_syscalls.recv(socket.ioHandle().fd(), buf_, Config::MAX_INSPECT_SIZE, MSG_PEEK); ENVOY_LOG(trace, "http inspector: recv: {}", result.rc_); if (result.rc_ == -1 && result.errno_ == EAGAIN) { - return; + return ParseState::Continue; } else if (result.rc_ < 0) { config_->stats().read_error_.inc(); - return done(false); + return ParseState::Error; } - parseHttpHeader(absl::string_view(reinterpret_cast(buf_), result.rc_)); + const auto parse_state = + parseHttpHeader(absl::string_view(reinterpret_cast(buf_), result.rc_)); + switch (parse_state) { + case ParseState::Continue: + // do nothing but wait for the next event + return ParseState::Continue; + case ParseState::Error: + done(false); + return ParseState::Done; + case ParseState::Done: + done(true); + return ParseState::Done; + } + NOT_REACHED_GCOVR_EXCL_LINE } -void Filter::parseHttpHeader(absl::string_view data) { +ParseState Filter::parseHttpHeader(absl::string_view data) { const size_t len = std::min(data.length(), Filter::HTTP2_CONNECTION_PREFACE.length()); if (Filter::HTTP2_CONNECTION_PREFACE.compare(0, len, data, 0, len) == 0) { if (data.length() < Filter::HTTP2_CONNECTION_PREFACE.length()) { - return; + return ParseState::Continue; } ENVOY_LOG(trace, "http inspector: http2 connection preface found"); protocol_ = "HTTP/2"; - done(true); + return ParseState::Done; } else { const size_t pos = data.find_first_of("\r\n"); if (pos != absl::string_view::npos) { @@ -90,20 +141,25 @@ void Filter::parseHttpHeader(absl::string_view data) { // Method SP Request-URI SP HTTP-Version if (fields.size() != 3) { ENVOY_LOG(trace, "http inspector: invalid http1x request line"); - return done(false); + // done(false); + return ParseState::Error; } if (http1xMethods().count(fields[0]) == 0 || httpProtocols().count(fields[2]) == 0) { ENVOY_LOG(trace, "http inspector: method: {} or protocol: {} not valid", fields[0], fields[2]); - return done(false); + // done(false); + return ParseState::Error; } ENVOY_LOG(trace, "http inspector: method: {}, request uri: {}, protocol: {}", fields[0], fields[1], fields[2]); protocol_ = fields[2]; - return done(true); + // done(true); + return ParseState::Done; + } else { + return ParseState::Continue; } } } @@ -122,17 +178,15 @@ void Filter::done(bool success) { } else { ASSERT(protocol_ == "HTTP/2"); config_->stats().http2_found_.inc(); - protocol = "h2"; + // h2 HTTP/2 over TLS, h2c HTTP/2 over TCP + // TODO(yxue): use detected protocol from http inspector and support h2c token in HCM + protocol = "h2c"; } cb_->socket().setRequestedApplicationProtocols({protocol}); } else { config_->stats().http_not_found_.inc(); } - - file_event_.reset(); - // Do not skip following listener filters. - cb_->continueFilterChain(true); } const absl::flat_hash_set& Filter::httpProtocols() const { diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.h b/source/extensions/filters/listener/http_inspector/http_inspector.h index affd419ca6e2..4c94a9428719 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.h +++ b/source/extensions/filters/listener/http_inspector/http_inspector.h @@ -32,6 +32,15 @@ struct HttpInspectorStats { ALL_HTTP_INSPECTOR_STATS(GENERATE_COUNTER_STRUCT) }; +enum class ParseState { + // Parse result is out. It could be http family or empty. + Done, + // Parser expects more data. + Continue, + // Parser reports unrecoverable error. + Error +}; + /** * Global configuration for http inspector. */ @@ -62,9 +71,9 @@ class Filter : public Network::ListenerFilter, Logger::Loggable& httpProtocols() const; const absl::flat_hash_set& http1xMethods() const; diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc index 4495f38cf722..a41ce47192d1 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc @@ -77,7 +77,9 @@ void ClientSslAuthConfig::parseResponse(const Http::Message& message) { stats_.total_principals_.set(new_principals->size()); } -void ClientSslAuthConfig::onFetchFailure(const EnvoyException*) { stats_.update_failure_.inc(); } +void ClientSslAuthConfig::onFetchFailure(Config::ConfigUpdateFailureReason, const EnvoyException*) { + stats_.update_failure_.inc(); +} static const std::string Path = "/v1/certs/list/approved"; diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h index 54ad916fe8dd..ca521a9a5a40 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h @@ -6,6 +6,7 @@ #include #include "envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.pb.h" +#include "envoy/config/subscription.h" #include "envoy/network/filter.h" #include "envoy/runtime/runtime.h" #include "envoy/stats/scope.h" @@ -94,7 +95,7 @@ class ClientSslAuthConfig : public Http::RestApiFetcher { void createRequest(Http::Message& request) override; void parseResponse(const Http::Message& response) override; void onFetchComplete() override {} - void onFetchFailure(const EnvoyException* e) override; + void onFetchFailure(Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; ThreadLocal::SlotPtr tls_; Network::Address::IpList ip_white_list_; diff --git a/source/extensions/filters/network/common/redis/redis_command_stats.cc b/source/extensions/filters/network/common/redis/redis_command_stats.cc index ce11df704bac..6eb6967c7644 100644 --- a/source/extensions/filters/network/common/redis/redis_command_stats.cc +++ b/source/extensions/filters/network/common/redis/redis_command_stats.cc @@ -9,34 +9,25 @@ namespace Common { namespace Redis { RedisCommandStats::RedisCommandStats(Stats::SymbolTable& symbol_table, const std::string& prefix) - : symbol_table_(symbol_table), stat_name_pool_(symbol_table_), - prefix_(stat_name_pool_.add(prefix)), - upstream_rq_time_(stat_name_pool_.add("upstream_rq_time")), - latency_(stat_name_pool_.add("latency")), total_(stat_name_pool_.add("total")), - success_(stat_name_pool_.add("success")), error_(stat_name_pool_.add("error")), - unused_metric_(stat_name_pool_.add("unused")), null_metric_(stat_name_pool_.add("null")), - unknown_metric_(stat_name_pool_.add("unknown")) { + : symbol_table_(symbol_table), stat_name_set_(symbol_table_), + prefix_(stat_name_set_.add(prefix)), + upstream_rq_time_(stat_name_set_.add("upstream_rq_time")), + latency_(stat_name_set_.add("latency")), total_(stat_name_set_.add("total")), + success_(stat_name_set_.add("success")), error_(stat_name_set_.add("error")), + unused_metric_(stat_name_set_.add("unused")), null_metric_(stat_name_set_.add("null")), + unknown_metric_(stat_name_set_.add("unknown")) { // Note: Even if this is disabled, we track the upstream_rq_time. // Create StatName for each Redis command. Note that we don't include Auth or Ping. - for (const std::string& command : - Extensions::NetworkFilters::Common::Redis::SupportedCommands::simpleCommands()) { - addCommandToPool(command); - } - for (const std::string& command : - Extensions::NetworkFilters::Common::Redis::SupportedCommands::evalCommands()) { - addCommandToPool(command); - } - for (const std::string& command : Extensions::NetworkFilters::Common::Redis::SupportedCommands:: - hashMultipleSumResultCommands()) { - addCommandToPool(command); - } - addCommandToPool(Extensions::NetworkFilters::Common::Redis::SupportedCommands::mget()); - addCommandToPool(Extensions::NetworkFilters::Common::Redis::SupportedCommands::mset()); -} - -void RedisCommandStats::addCommandToPool(const std::string& command_string) { - Stats::StatName command = stat_name_pool_.add(command_string); - stat_name_map_[command_string] = command; + stat_name_set_.rememberBuiltins( + Extensions::NetworkFilters::Common::Redis::SupportedCommands::simpleCommands()); + stat_name_set_.rememberBuiltins( + Extensions::NetworkFilters::Common::Redis::SupportedCommands::evalCommands()); + stat_name_set_.rememberBuiltins(Extensions::NetworkFilters::Common::Redis::SupportedCommands:: + hashMultipleSumResultCommands()); + stat_name_set_.rememberBuiltin( + Extensions::NetworkFilters::Common::Redis::SupportedCommands::mget()); + stat_name_set_.rememberBuiltin( + Extensions::NetworkFilters::Common::Redis::SupportedCommands::mset()); } Stats::Counter& RedisCommandStats::counter(Stats::Scope& scope, @@ -76,17 +67,9 @@ Stats::StatName RedisCommandStats::getCommandFromRequest(const RespValue& reques case RespType::Null: return null_metric_; default: - // Once we have a RespType::String we lowercase it and then look it up in our stat_name_map. - // If it does not exist, we return our unknown stat name. std::string to_lower_command(request.asString()); to_lower_table_.toLowerCase(to_lower_command); - - auto iter = stat_name_map_.find(to_lower_command); - if (iter != stat_name_map_.end()) { - return iter->second; - } else { - return unknown_metric_; - } + return stat_name_set_.getBuiltin(to_lower_command, unknown_metric_); } } @@ -107,4 +90,4 @@ void RedisCommandStats::updateStats(Stats::Scope& scope, Stats::StatName command } // namespace Common } // namespace NetworkFilters } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/extensions/filters/network/common/redis/redis_command_stats.h b/source/extensions/filters/network/common/redis/redis_command_stats.h index 0ee2ce824c48..b4b8c87a29db 100644 --- a/source/extensions/filters/network/common/redis/redis_command_stats.h +++ b/source/extensions/filters/network/common/redis/redis_command_stats.h @@ -41,11 +41,8 @@ class RedisCommandStats { Stats::StatName getUnusedStatName() { return unused_metric_; } private: - void addCommandToPool(const std::string& command_string); - Stats::SymbolTable& symbol_table_; - Stats::StatNamePool stat_name_pool_; - StringMap stat_name_map_; + Stats::StatNameSet stat_name_set_; const Stats::StatName prefix_; const Stats::StatName upstream_rq_time_; const Stats::StatName latency_; diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.cc b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc index 11dd1877cefc..f82ca1a41f11 100644 --- a/source/extensions/filters/network/mongo_proxy/mongo_stats.cc +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc @@ -21,7 +21,13 @@ MongoStats::MongoStats(Stats::Scope& scope, const std::string& prefix) reply_size_(stat_name_set_.add("reply_size")), reply_time_ms_(stat_name_set_.add("reply_time_ms")), time_ms_(stat_name_set_.add("time_ms")), query_(stat_name_set_.add("query")), scatter_get_(stat_name_set_.add("scatter_get")), - total_(stat_name_set_.add("total")) {} + total_(stat_name_set_.add("total")), unknown_command_(stat_name_set_.add("unknown_command")) { + + // TODO(jmarantz): is this the right set of mongo commands to use as builtins? + // Should we also have builtins for callsites or collections, or do those need + // to be dynamic? + stat_name_set_.rememberBuiltins({"insert", "query", "update", "delete"}); +} Stats::SymbolTable::StoragePtr MongoStats::addPrefix(const std::vector& names) { std::vector names_with_prefix; diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.h b/source/extensions/filters/network/mongo_proxy/mongo_stats.h index d27a8478824a..e4cb0426ba00 100644 --- a/source/extensions/filters/network/mongo_proxy/mongo_stats.h +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.h @@ -26,7 +26,11 @@ class MongoStats { * TODO(jmarantz): Potential perf issue here with mutex contention for names * that have not been remembered as builtins in the constructor. */ - Stats::StatName getStatName(const std::string& str) { return stat_name_set_.getStatName(str); } + Stats::StatName getBuiltin(const std::string& str, Stats::StatName fallback) { + return stat_name_set_.getBuiltin(str, fallback); + } + + Stats::StatName getDynamic(const std::string& str) { return stat_name_set_.getDynamic(str); } private: Stats::SymbolTable::StoragePtr addPrefix(const std::vector& names); @@ -47,6 +51,7 @@ class MongoStats { const Stats::StatName query_; const Stats::StatName scatter_get_; const Stats::StatName total_; + const Stats::StatName unknown_command_; }; using MongoStatsSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/network/mongo_proxy/proxy.cc b/source/extensions/filters/network/mongo_proxy/proxy.cc index bc35859113f8..81618c35880d 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.cc +++ b/source/extensions/filters/network/mongo_proxy/proxy.cc @@ -146,7 +146,8 @@ void ProxyFilter::decodeQuery(QueryMessagePtr&& message) { if (!active_query->query_info_.command().empty()) { // First field key is the operation. mongo_stats_->incCounter({mongo_stats_->cmd_, - mongo_stats_->getStatName(active_query->query_info_.command()), + mongo_stats_->getBuiltin(active_query->query_info_.command(), + mongo_stats_->unknown_command_), mongo_stats_->total_}); } else { // Normal query, get stats on a per collection basis first. @@ -154,13 +155,13 @@ void ProxyFilter::decodeQuery(QueryMessagePtr&& message) { Stats::StatNameVec names; names.reserve(6); // 2 entries are added by chargeQueryStats(). names.push_back(mongo_stats_->collection_); - names.push_back(mongo_stats_->getStatName(active_query->query_info_.collection())); + names.push_back(mongo_stats_->getDynamic(active_query->query_info_.collection())); chargeQueryStats(names, query_type); // Callsite stats if we have it. if (!active_query->query_info_.callsite().empty()) { names.push_back(mongo_stats_->callsite_); - names.push_back(mongo_stats_->getStatName(active_query->query_info_.callsite())); + names.push_back(mongo_stats_->getDynamic(active_query->query_info_.callsite())); chargeQueryStats(names, query_type); } @@ -223,12 +224,13 @@ void ProxyFilter::decodeReply(ReplyMessagePtr&& message) { if (!active_query.query_info_.command().empty()) { Stats::StatNameVec names{mongo_stats_->cmd_, - mongo_stats_->getStatName(active_query.query_info_.command())}; + mongo_stats_->getBuiltin(active_query.query_info_.command(), + mongo_stats_->unknown_command_)}; chargeReplyStats(active_query, names, *message); } else { // Collection stats first. Stats::StatNameVec names{mongo_stats_->collection_, - mongo_stats_->getStatName(active_query.query_info_.collection()), + mongo_stats_->getDynamic(active_query.query_info_.collection()), mongo_stats_->query_}; chargeReplyStats(active_query, names, *message); @@ -238,7 +240,7 @@ void ProxyFilter::decodeReply(ReplyMessagePtr&& message) { // to mutate the array to {"collection", collection, "callsite", callsite, "query"}. ASSERT(names.size() == 3); names.back() = mongo_stats_->callsite_; // Replaces "query". - names.push_back(mongo_stats_->getStatName(active_query.query_info_.callsite())); + names.push_back(mongo_stats_->getDynamic(active_query.query_info_.callsite())); names.push_back(mongo_stats_->query_); chargeReplyStats(active_query, names, *message); } diff --git a/source/extensions/filters/network/zookeeper_proxy/filter.cc b/source/extensions/filters/network/zookeeper_proxy/filter.cc index c2f4d00dc8af..2fee4392cc5b 100644 --- a/source/extensions/filters/network/zookeeper_proxy/filter.cc +++ b/source/extensions/filters/network/zookeeper_proxy/filter.cc @@ -21,16 +21,48 @@ ZooKeeperFilterConfig::ZooKeeperFilterConfig(const std::string& stat_prefix, : scope_(scope), max_packet_bytes_(max_packet_bytes), stats_(generateStats(stat_prefix, scope)), stat_name_set_(scope.symbolTable()), stat_prefix_(stat_name_set_.add(stat_prefix)), auth_(stat_name_set_.add("auth")), - connect_latency_(stat_name_set_.add("connect_response_latency")) { + connect_latency_(stat_name_set_.add("connect_response_latency")), + unknown_scheme_rq_(stat_name_set_.add("unknown_scheme_rq")), + unknown_opcode_latency_(stat_name_set_.add("unknown_opcode_latency")) { // https://zookeeper.apache.org/doc/r3.5.4-beta/zookeeperProgrammers.html#sc_BuiltinACLSchemes // lists commons schemes: "world", "auth", "digest", "host", "x509", and // "ip". These are used in filter.cc by appending "_rq". - stat_name_set_.rememberBuiltin("auth_rq"); - stat_name_set_.rememberBuiltin("digest_rq"); - stat_name_set_.rememberBuiltin("host_rq"); - stat_name_set_.rememberBuiltin("ip_rq"); - stat_name_set_.rememberBuiltin("world_rq"); - stat_name_set_.rememberBuiltin("x509_rq"); + stat_name_set_.rememberBuiltins( + {"auth_rq", "digest_rq", "host_rq", "ip_rq", "ping_response_rq", "world_rq", "x509_rq"}); + + initOpCode(OpCodes::PING, stats_.ping_resp_, "ping_response"); + initOpCode(OpCodes::SETAUTH, stats_.auth_resp_, "auth_response"); + initOpCode(OpCodes::GETDATA, stats_.getdata_resp_, "getdata_resp"); + initOpCode(OpCodes::CREATE, stats_.create_resp_, "create_resp"); + initOpCode(OpCodes::CREATE2, stats_.create2_resp_, "create2_resp"); + initOpCode(OpCodes::CREATECONTAINER, stats_.createcontainer_resp_, "createcontainer_resp"); + initOpCode(OpCodes::CREATETTL, stats_.createttl_resp_, "createttl_resp"); + initOpCode(OpCodes::SETDATA, stats_.setdata_resp_, "setdata_resp"); + initOpCode(OpCodes::GETCHILDREN, stats_.getchildren_resp_, "getchildren_resp"); + initOpCode(OpCodes::GETCHILDREN2, stats_.getchildren2_resp_, "getchildren2_resp"); + initOpCode(OpCodes::DELETE, stats_.delete_resp_, "delete_resp"); + initOpCode(OpCodes::EXISTS, stats_.exists_resp_, "exists_resp"); + initOpCode(OpCodes::GETACL, stats_.getacl_resp_, "getacl_resp"); + initOpCode(OpCodes::SETACL, stats_.setacl_resp_, "setacl_resp"); + initOpCode(OpCodes::SYNC, stats_.sync_resp_, "sync_resp"); + initOpCode(OpCodes::CHECK, stats_.check_resp_, "check_resp"); + initOpCode(OpCodes::MULTI, stats_.multi_resp_, "multi_resp"); + initOpCode(OpCodes::RECONFIG, stats_.reconfig_resp_, "reconfig_resp"); + initOpCode(OpCodes::SETWATCHES, stats_.setwatches_resp_, "setwatches_resp"); + initOpCode(OpCodes::CHECKWATCHES, stats_.checkwatches_resp_, "checkwatches_resp"); + initOpCode(OpCodes::REMOVEWATCHES, stats_.removewatches_resp_, "removewatches_resp"); + initOpCode(OpCodes::GETEPHEMERALS, stats_.getephemerals_resp_, "getephemerals_resp"); + initOpCode(OpCodes::GETALLCHILDRENNUMBER, stats_.getallchildrennumber_resp_, + "getallchildrennumber_resp"); + initOpCode(OpCodes::CLOSE, stats_.close_resp_, "close_resp"); +} + +void ZooKeeperFilterConfig::initOpCode(OpCodes opcode, Stats::Counter& counter, + absl::string_view name) { + OpCodeInfo& opcode_info = op_code_map_[opcode]; + opcode_info.counter_ = &counter; + opcode_info.opname_ = std::string(name); + opcode_info.latency_name_ = stat_name_set_.add(absl::StrCat(name, "_latency")); } ZooKeeperFilter::ZooKeeperFilter(ZooKeeperFilterConfigSharedPtr config, TimeSource& time_source) @@ -121,7 +153,8 @@ void ZooKeeperFilter::onPing() { void ZooKeeperFilter::onAuthRequest(const std::string& scheme) { Stats::SymbolTable::StoragePtr storage = config_->scope_.symbolTable().join( {config_->stat_prefix_, config_->auth_, - config_->stat_name_set_.getStatName(absl::StrCat(scheme, "_rq"))}); + config_->stat_name_set_.getBuiltin(absl::StrCat(scheme, "_rq"), + config_->unknown_scheme_rq_)}); config_->scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); setDynamicMetadata("opname", "auth"); } @@ -267,112 +300,17 @@ void ZooKeeperFilter::onConnectResponse(const int32_t proto_version, const int32 void ZooKeeperFilter::onResponse(const OpCodes opcode, const int32_t xid, const int64_t zxid, const int32_t error, const std::chrono::milliseconds& latency) { + Stats::StatName opcode_latency = config_->unknown_opcode_latency_; + auto iter = config_->op_code_map_.find(opcode); std::string opname = ""; - - switch (opcode) { - case OpCodes::PING: - config_->stats_.ping_resp_.inc(); - opname = "ping_response"; - break; - case OpCodes::SETAUTH: - config_->stats_.auth_resp_.inc(); - opname = "auth_response"; - break; - case OpCodes::GETDATA: - opname = "getdata_resp"; - config_->stats_.getdata_resp_.inc(); - break; - case OpCodes::CREATE: - opname = "create_resp"; - config_->stats_.create_resp_.inc(); - break; - case OpCodes::CREATE2: - opname = "create2_resp"; - config_->stats_.create2_resp_.inc(); - break; - case OpCodes::CREATECONTAINER: - opname = "createcontainer_resp"; - config_->stats_.createcontainer_resp_.inc(); - break; - case OpCodes::CREATETTL: - opname = "createttl_resp"; - config_->stats_.createttl_resp_.inc(); - break; - case OpCodes::SETDATA: - opname = "setdata_resp"; - config_->stats_.setdata_resp_.inc(); - break; - case OpCodes::GETCHILDREN: - opname = "getchildren_resp"; - config_->stats_.getchildren_resp_.inc(); - break; - case OpCodes::GETCHILDREN2: - opname = "getchildren2_resp"; - config_->stats_.getchildren2_resp_.inc(); - break; - case OpCodes::DELETE: - opname = "delete_resp"; - config_->stats_.delete_resp_.inc(); - break; - case OpCodes::EXISTS: - opname = "exists_resp"; - config_->stats_.exists_resp_.inc(); - break; - case OpCodes::GETACL: - config_->stats_.getacl_resp_.inc(); - opname = "getacl_resp"; - break; - case OpCodes::SETACL: - opname = "setacl_resp"; - config_->stats_.setacl_resp_.inc(); - break; - case OpCodes::SYNC: - opname = "sync_resp"; - config_->stats_.sync_resp_.inc(); - break; - case OpCodes::CHECK: - opname = "check_resp"; - config_->stats_.check_resp_.inc(); - break; - case OpCodes::MULTI: - opname = "multi_resp"; - config_->stats_.multi_resp_.inc(); - break; - case OpCodes::RECONFIG: - opname = "reconfig_resp"; - config_->stats_.reconfig_resp_.inc(); - break; - case OpCodes::SETWATCHES: - opname = "setwatches_resp"; - config_->stats_.setwatches_resp_.inc(); - break; - case OpCodes::CHECKWATCHES: - opname = "checkwatches_resp"; - config_->stats_.checkwatches_resp_.inc(); - break; - case OpCodes::REMOVEWATCHES: - opname = "removewatches_resp"; - config_->stats_.removewatches_resp_.inc(); - break; - case OpCodes::GETEPHEMERALS: - opname = "getephemerals_resp"; - config_->stats_.getephemerals_resp_.inc(); - break; - case OpCodes::GETALLCHILDRENNUMBER: - opname = "getallchildrennumber_resp"; - config_->stats_.getallchildrennumber_resp_.inc(); - break; - case OpCodes::CLOSE: - opname = "close_resp"; - config_->stats_.close_resp_.inc(); - break; - default: - break; + if (iter != config_->op_code_map_.end()) { + const ZooKeeperFilterConfig::OpCodeInfo& opcode_info = iter->second; + opcode_info.counter_->inc(); + opname = opcode_info.opname_; + opcode_latency = opcode_info.latency_name_; } - - Stats::SymbolTable::StoragePtr storage = config_->scope_.symbolTable().join( - {config_->stat_prefix_, - config_->stat_name_set_.getStatName(absl::StrCat(opname, "_latency"))}); + Stats::SymbolTable::StoragePtr storage = + config_->scope_.symbolTable().join({config_->stat_prefix_, opcode_latency}); config_->scope_.histogramFromStatName(Stats::StatName(storage.get())) .recordValue(latency.count()); diff --git a/source/extensions/filters/network/zookeeper_proxy/filter.h b/source/extensions/filters/network/zookeeper_proxy/filter.h index 7836b7a5b765..14d6ca1b9a89 100644 --- a/source/extensions/filters/network/zookeeper_proxy/filter.h +++ b/source/extensions/filters/network/zookeeper_proxy/filter.h @@ -102,6 +102,17 @@ class ZooKeeperFilterConfig { const ZooKeeperProxyStats& stats() { return stats_; } uint32_t maxPacketBytes() const { return max_packet_bytes_; } + // Captures the counter used to track total op-code usage, as well as the + // StatName under which to collect the latency for that op-code. The + // latency-name will be joined with the stat_prefix_, which varies per filter + // instance. + struct OpCodeInfo { + Stats::Counter* counter_; + std::string opname_; + Stats::StatName latency_name_; + }; + + absl::flat_hash_map op_code_map_; Stats::Scope& scope_; const uint32_t max_packet_bytes_; ZooKeeperProxyStats stats_; @@ -109,8 +120,12 @@ class ZooKeeperFilterConfig { const Stats::StatName stat_prefix_; const Stats::StatName auth_; const Stats::StatName connect_latency_; + const Stats::StatName unknown_scheme_rq_; + const Stats::StatName unknown_opcode_latency_; private: + void initOpCode(OpCodes opcode, Stats::Counter& counter, absl::string_view name); + ZooKeeperProxyStats generateStats(const std::string& prefix, Stats::Scope& scope) { return ZooKeeperProxyStats{ALL_ZOOKEEPER_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; } diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index be1fa5ff80a0..3e059d44d795 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -234,3 +234,19 @@ envoy_cc_library( "@com_googlesource_quiche//:quic_core_http_header_list_lib", ], ) + +envoy_cc_library( + name = "quic_transport_socket_factory_lib", + srcs = ["quic_transport_socket_factory.cc"], + hdrs = ["quic_transport_socket_factory.h"], + tags = ["nofips"], + deps = [ + "//include/envoy/network:transport_socket_interface", + "//include/envoy/server:transport_socket_config_interface", + "//include/envoy/ssl:context_config_interface", + "//source/common/common:assert_lib", + "//source/extensions/transport_sockets:well_known_names", + "//source/extensions/transport_sockets/tls:context_config_lib", + "@envoy_api//envoy/api/v2/auth:cert_cc", + ], +) diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc index 5f6b5ff71a43..6272cc98df15 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc @@ -11,29 +11,30 @@ namespace Envoy { namespace Quic { ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, - Network::ConnectionHandler& parent, spdlog::logger& logger, + Network::ConnectionHandler& parent, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config) : ActiveQuicListener(dispatcher, parent, - dispatcher.createUdpListener(listener_config.socket(), *this), logger, + dispatcher.createUdpListener(listener_config.socket(), *this), listener_config, quic_config) {} ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, - Network::UdpListenerPtr&& listener, spdlog::logger& logger, + Network::UdpListenerPtr&& listener, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config) : ActiveQuicListener(dispatcher, parent, std::make_unique(*listener), - std::move(listener), logger, listener_config, quic_config) {} + std::move(listener), listener_config, quic_config) {} ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, std::unique_ptr writer, - Network::UdpListenerPtr&& listener, spdlog::logger& logger, + Network::UdpListenerPtr&& listener, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config) - : Server::ConnectionHandlerImpl::ActiveListenerImplBase(std::move(listener), listener_config), - logger_(logger), dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedVersions()) { + : Server::ConnectionHandlerImpl::ActiveListenerImplBase(parent, std::move(listener), + listener_config), + dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedVersions()) { quic::QuicRandom* const random = quic::QuicRandom::GetInstance(); random->RandBytes(random_seed_, sizeof(random_seed_)); crypto_config_ = std::make_unique( @@ -51,7 +52,7 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, } void ActiveQuicListener::onListenerShutdown() { - ENVOY_LOG_TO_LOGGER(logger_, info, "Quic listener {} shutdown.", config_.name()); + ENVOY_LOG(info, "Quic listener {} shutdown.", config_.name()); quic_dispatcher_->Shutdown(); } diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.h b/source/extensions/quic_listeners/quiche/active_quic_listener.h index bb327d12fd60..89ef57d83727 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.h +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.h @@ -22,15 +22,15 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, // filter. // TODO(danzh): clean up meaningless inheritance. public Network::UdpListenerFilterManager, - public Network::UdpReadFilterCallbacks { + public Network::UdpReadFilterCallbacks, + Logger::Loggable { public: ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, - spdlog::logger& logger, Network::ListenerConfig& listener_config, - const quic::QuicConfig& quic_config); + Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config); ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, - Network::UdpListenerPtr&& listener, spdlog::logger& logger, - Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config); + Network::UdpListenerPtr&& listener, Network::ListenerConfig& listener_config, + const quic::QuicConfig& quic_config); // TODO(#7465): Make this a callback. void onListenerShutdown(); @@ -57,12 +57,11 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, std::unique_ptr writer, - Network::UdpListenerPtr&& listener, spdlog::logger& logger, - Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config); + Network::UdpListenerPtr&& listener, Network::ListenerConfig& listener_config, + const quic::QuicConfig& quic_config); uint8_t random_seed_[16]; std::unique_ptr crypto_config_; - spdlog::logger& logger_; Event::Dispatcher& dispatcher_; quic::QuicVersionManager version_manager_; std::unique_ptr quic_dispatcher_; @@ -91,11 +90,13 @@ class ActiveQuicListenerFactory : public Network::ActiveUdpListenerFactory { quic_config_.SetMaxIncomingUnidirectionalStreamsToSend(max_streams); } + // Network::ActiveUdpListenerFactory. Network::ConnectionHandler::ActiveListenerPtr createActiveUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& disptacher, - spdlog::logger& logger, Network::ListenerConfig& config) const override { - return std::make_unique(disptacher, parent, logger, config, quic_config_); + Network::ListenerConfig& config) const override { + return std::make_unique(disptacher, parent, config, quic_config_); } + bool isTransportConnectionless() const override { return false; } private: friend class ActiveQuicListenerFactoryPeer; diff --git a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc new file mode 100644 index 000000000000..606739e7caeb --- /dev/null +++ b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc @@ -0,0 +1,48 @@ +#include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" + +#include "envoy/api/v2/auth/cert.pb.h" +#include "envoy/api/v2/auth/cert.pb.validate.h" + +#include "extensions/transport_sockets/tls/context_config_impl.h" + +namespace Envoy { +namespace Quic { + +Network::TransportSocketFactoryPtr +QuicServerTransportSocketConfigFactory::createTransportSocketFactory( + const Protobuf::Message& config, Server::Configuration::TransportSocketFactoryContext& context, + const std::vector& /*server_names*/) { + auto server_config = std::make_unique( + MessageUtil::downcastAndValidate( + config, context.messageValidationVisitor()), + context); + return std::make_unique(std::move(server_config)); +} + +ProtobufTypes::MessagePtr QuicServerTransportSocketConfigFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +Network::TransportSocketFactoryPtr +QuicClientTransportSocketConfigFactory::createTransportSocketFactory( + const Protobuf::Message& config, + Server::Configuration::TransportSocketFactoryContext& context) { + auto client_config = std::make_unique( + MessageUtil::downcastAndValidate( + config, context.messageValidationVisitor()), + context); + return std::make_unique(std::move(client_config)); +} + +ProtobufTypes::MessagePtr QuicClientTransportSocketConfigFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +REGISTER_FACTORY(QuicServerTransportSocketConfigFactory, + Server::Configuration::DownstreamTransportSocketConfigFactory); + +REGISTER_FACTORY(QuicClientTransportSocketConfigFactory, + Server::Configuration::UpstreamTransportSocketConfigFactory); + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h new file mode 100644 index 000000000000..e254692df48e --- /dev/null +++ b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h @@ -0,0 +1,94 @@ +#include "envoy/network/transport_socket.h" +#include "envoy/server/transport_socket_config.h" +#include "envoy/ssl/context_config.h" + +#include "common/common/assert.h" + +#include "extensions/transport_sockets/well_known_names.h" + +namespace Envoy { +namespace Quic { + +// Base class for QUIC transport socket factory. +// Because QUIC stack handles all L4 data, there is no need of a real transport +// socket for QUIC in current implementation. This factory doesn't provides a +// transport socket, instead, its derived class provides TLS context config for +// server and client. +class QuicTransportSocketFactoryBase : public Network::TransportSocketFactory { +public: + // Network::TransportSocketFactory + Network::TransportSocketPtr + createTransportSocket(Network::TransportSocketOptionsSharedPtr /*options*/) const override { + NOT_REACHED_GCOVR_EXCL_LINE; + } + bool implementsSecureTransport() const override { return true; } +}; + +// TODO(danzh): when implement ProofSource, examine of it's necessary to +// differentiate server and client side context config. +class QuicServerTransportSocketFactory : public QuicTransportSocketFactoryBase { +public: + QuicServerTransportSocketFactory(Ssl::ServerContextConfigPtr config) + : config_(std::move(config)) {} + + const Ssl::ServerContextConfig& serverContextConfig() const { return *config_; } + +private: + std::unique_ptr config_; +}; + +class QuicClientTransportSocketFactory : public QuicTransportSocketFactoryBase { +public: + QuicClientTransportSocketFactory(Envoy::Ssl::ClientContextConfigPtr config) + : config_(std::move(config)) {} + + const Ssl::ClientContextConfig& clientContextConfig() const { return *config_; } + +private: + std::unique_ptr config_; +}; + +// Base class to create above QuicTransportSocketFactory for server and client +// side. +class QuicTransportSocketConfigFactory + : public virtual Server::Configuration::TransportSocketConfigFactory { +public: + // Server::Configuration::TransportSocketConfigFactory + std::string name() const override { + return Extensions::TransportSockets::TransportSocketNames::get().Quic; + } +}; + +class QuicServerTransportSocketConfigFactory + : public QuicTransportSocketConfigFactory, + public Server::Configuration::DownstreamTransportSocketConfigFactory { +public: + // Server::Configuration::DownstreamTransportSocketConfigFactory + Network::TransportSocketFactoryPtr + createTransportSocketFactory(const Protobuf::Message& config, + Server::Configuration::TransportSocketFactoryContext& context, + const std::vector& server_names) override; + + // Server::Configuration::TransportSocketConfigFactory + ProtobufTypes::MessagePtr createEmptyConfigProto() override; +}; + +DECLARE_FACTORY(QuicServerTransportSocketConfigFactory); + +class QuicClientTransportSocketConfigFactory + : public QuicTransportSocketConfigFactory, + public Server::Configuration::UpstreamTransportSocketConfigFactory { +public: + // Server::Configuration::UpstreamTransportSocketConfigFactory + Network::TransportSocketFactoryPtr createTransportSocketFactory( + const Protobuf::Message& config, + Server::Configuration::TransportSocketFactoryContext& context) override; + + // Server::Configuration::TransportSocketConfigFactory + ProtobufTypes::MessagePtr createEmptyConfigProto() override; +}; + +DECLARE_FACTORY(QuicClientTransportSocketConfigFactory); + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/transport_sockets/alts/BUILD b/source/extensions/transport_sockets/alts/BUILD index 2f65e098c4b8..a13af2b2c3ea 100644 --- a/source/extensions/transport_sockets/alts/BUILD +++ b/source/extensions/transport_sockets/alts/BUILD @@ -38,6 +38,7 @@ envoy_cc_library( ":tsi_socket", "//include/envoy/registry", "//include/envoy/server:transport_socket_config_interface", + "//source/common/grpc:google_grpc_context_lib", "//source/extensions/transport_sockets:well_known_names", "@envoy_api//envoy/config/transport_socket/alts/v2alpha:alts_cc", ], diff --git a/source/extensions/transport_sockets/alts/config.cc b/source/extensions/transport_sockets/alts/config.cc index 712f69282078..65ca6297eb07 100644 --- a/source/extensions/transport_sockets/alts/config.cc +++ b/source/extensions/transport_sockets/alts/config.cc @@ -6,6 +6,7 @@ #include "envoy/server/transport_socket_config.h" #include "common/common/assert.h" +#include "common/grpc/google_grpc_context.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" @@ -65,6 +66,9 @@ class AltsSharedState : public Singleton::Instance { AltsSharedState() { grpc_alts_shared_resource_dedicated_init(); } ~AltsSharedState() override { grpc_alts_shared_resource_dedicated_shutdown(); } + +private: + Grpc::GoogleGrpcContext google_grpc_context_; }; SINGLETON_MANAGER_REGISTRATION(alts_shared_state); diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 85928172612e..873bcc7af087 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -52,6 +52,10 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c TimeSource& time_source) : scope_(scope), stats_(generateStats(scope)), time_source_(time_source), tls_max_version_(config.maxProtocolVersion()), stat_name_set_(scope.symbolTable()), + unknown_ssl_cipher_(stat_name_set_.add("unknown_ssl_cipher")), + unknown_ssl_curve_(stat_name_set_.add("unknown_ssl_curve")), + unknown_ssl_algorithm_(stat_name_set_.add("unknown_ssl_algorithm")), + unknown_ssl_version_(stat_name_set_.add("unknown_ssl_version")), ssl_ciphers_(stat_name_set_.add("ssl.ciphers")), ssl_versions_(stat_name_set_.add("ssl.versions")), ssl_curves_(stat_name_set_.add("ssl.curves")), @@ -389,19 +393,18 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c stat_name_set_.rememberBuiltin("ECDHE-RSA-AES128-GCM-SHA256"); stat_name_set_.rememberBuiltin("ECDHE-RSA-AES128-SHA"); stat_name_set_.rememberBuiltin("ECDHE-RSA-CHACHA20-POLY1305"); + stat_name_set_.rememberBuiltin("TLS_AES_128_GCM_SHA256"); - // Curves - stat_name_set_.rememberBuiltin("X25519"); + // Curves from + // https://github.com/google/boringssl/blob/f4d8b969200f1ee2dd872ffb85802e6a0976afe7/ssl/ssl_key_share.cc#L384 + stat_name_set_.rememberBuiltins( + {"P-224", "P-256", "P-384", "P-521", "X25519", "CECPQ2", "CECPQ2b"}); // Algorithms - stat_name_set_.rememberBuiltin("ecdsa_secp256r1_sha256"); - stat_name_set_.rememberBuiltin("rsa_pss_rsae_sha256"); + stat_name_set_.rememberBuiltins({"ecdsa_secp256r1_sha256", "rsa_pss_rsae_sha256"}); // Versions - stat_name_set_.rememberBuiltin("TLSv1"); - stat_name_set_.rememberBuiltin("TLSv1.1"); - stat_name_set_.rememberBuiltin("TLSv1.2"); - stat_name_set_.rememberBuiltin("TLSv1.3"); + stat_name_set_.rememberBuiltins({"TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3"}); } int ServerContextImpl::alpnSelectCallback(const unsigned char** out, unsigned char* outlen, @@ -510,10 +513,11 @@ int ContextImpl::verifyCertificate(X509* cert, const std::vector& v return 1; } -void ContextImpl::incCounter(const Stats::StatName name, absl::string_view value) const { +void ContextImpl::incCounter(const Stats::StatName name, absl::string_view value, + const Stats::StatName fallback) const { Stats::SymbolTable& symbol_table = scope_.symbolTable(); Stats::SymbolTable::StoragePtr storage = - symbol_table.join({name, stat_name_set_.getStatName(value)}); + symbol_table.join({name, stat_name_set_.getBuiltin(value, fallback)}); scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); #ifdef LOG_BUILTIN_STAT_NAMES @@ -529,19 +533,18 @@ void ContextImpl::logHandshake(SSL* ssl) const { stats_.session_reused_.inc(); } - incCounter(ssl_ciphers_, SSL_get_cipher_name(ssl)); - incCounter(ssl_versions_, SSL_get_version(ssl)); + incCounter(ssl_ciphers_, SSL_get_cipher_name(ssl), unknown_ssl_cipher_); + incCounter(ssl_versions_, SSL_get_version(ssl), unknown_ssl_version_); uint16_t curve_id = SSL_get_curve_id(ssl); if (curve_id) { - // Note: in the unit tests, this curve name is always literal "X25519" - incCounter(ssl_curves_, SSL_get_curve_name(curve_id)); + incCounter(ssl_curves_, SSL_get_curve_name(curve_id), unknown_ssl_curve_); } uint16_t sigalg_id = SSL_get_peer_signature_algorithm(ssl); if (sigalg_id) { const char* sigalg = SSL_get_signature_algorithm_name(sigalg_id, 1 /* include curve */); - incCounter(ssl_sigalgs_, sigalg); + incCounter(ssl_sigalgs_, sigalg, unknown_ssl_algorithm_); } bssl::UniquePtr cert(SSL_get_peer_certificate(ssl)); diff --git a/source/extensions/transport_sockets/tls/context_impl.h b/source/extensions/transport_sockets/tls/context_impl.h index a730187bcd54..af88c9cf0ba3 100644 --- a/source/extensions/transport_sockets/tls/context_impl.h +++ b/source/extensions/transport_sockets/tls/context_impl.h @@ -128,7 +128,8 @@ class ContextImpl : public virtual Envoy::Ssl::Context { static SslStats generateStats(Stats::Scope& scope); std::string getCaFileName() const { return ca_file_path_; }; - void incCounter(const Stats::StatName name, absl::string_view value) const; + void incCounter(const Stats::StatName name, absl::string_view value, + const Stats::StatName fallback) const; Envoy::Ssl::CertificateDetailsPtr certificateDetails(X509* cert, const std::string& path) const; @@ -171,6 +172,10 @@ class ContextImpl : public virtual Envoy::Ssl::Context { TimeSource& time_source_; const unsigned tls_max_version_; mutable Stats::StatNameSet stat_name_set_; + const Stats::StatName unknown_ssl_cipher_; + const Stats::StatName unknown_ssl_curve_; + const Stats::StatName unknown_ssl_algorithm_; + const Stats::StatName unknown_ssl_version_; const Stats::StatName ssl_ciphers_; const Stats::StatName ssl_versions_; const Stats::StatName ssl_curves_; diff --git a/source/server/active_raw_udp_listener_config.cc b/source/server/active_raw_udp_listener_config.cc index 0d6cbb196cf7..8eea3f4742e9 100644 --- a/source/server/active_raw_udp_listener_config.cc +++ b/source/server/active_raw_udp_listener_config.cc @@ -6,10 +6,11 @@ namespace Envoy { namespace Server { -Network::ConnectionHandler::ActiveListenerPtr ActiveRawUdpListenerFactory::createActiveUdpListener( - Network::ConnectionHandler& /*parent*/, Event::Dispatcher& dispatcher, - spdlog::logger& /*logger*/, Network::ListenerConfig& config) const { - return std::make_unique(dispatcher, config); +Network::ConnectionHandler::ActiveListenerPtr +ActiveRawUdpListenerFactory::createActiveUdpListener(Network::ConnectionHandler& parent, + Event::Dispatcher& dispatcher, + Network::ListenerConfig& config) const { + return std::make_unique(parent, dispatcher, config); } ProtobufTypes::MessagePtr ActiveRawUdpListenerConfigFactory::createEmptyConfigProto() { diff --git a/source/server/active_raw_udp_listener_config.h b/source/server/active_raw_udp_listener_config.h index b58554c9c3fb..3a2477f27492 100644 --- a/source/server/active_raw_udp_listener_config.h +++ b/source/server/active_raw_udp_listener_config.h @@ -11,7 +11,9 @@ class ActiveRawUdpListenerFactory : public Network::ActiveUdpListenerFactory { public: Network::ConnectionHandler::ActiveListenerPtr createActiveUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& disptacher, - spdlog::logger& logger, Network::ListenerConfig& config) const override; + Network::ListenerConfig& config) const override; + + bool isTransportConnectionless() const override { return true; } }; // This class uses a protobuf config to create a UDP listener factory which diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 8a59c4a046ff..d447c5822940 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -94,9 +94,7 @@ class ValidationInstance : Logger::Loggable, Stats::Store& stats() override { return stats_store_; } Grpc::Context& grpcContext() override { return grpc_context_; } Http::Context& httpContext() override { return http_context_; } - absl::optional> processContext() override { - return absl::nullopt; - } + OptProcessContextRef processContext() override { return absl::nullopt; } ThreadLocal::Instance& threadLocal() override { return thread_local_; } const LocalInfo::LocalInfo& localInfo() override { return *local_info_; } TimeSource& timeSource() override { return api_->timeSource(); } @@ -145,7 +143,7 @@ class ValidationInstance : Logger::Loggable, uint64_t nextListenerTag() override { return 0; } // Server::WorkerFactory - WorkerPtr createWorker(OverloadManager&) override { + WorkerPtr createWorker(OverloadManager&, const std::string&) override { // Returned workers are not currently used so we can return nothing here safely vs. a // validation mock. return nullptr; diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 398cb88fd71c..f21c1715821a 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -14,8 +14,10 @@ namespace Envoy { namespace Server { -ConnectionHandlerImpl::ConnectionHandlerImpl(spdlog::logger& logger, Event::Dispatcher& dispatcher) - : logger_(logger), dispatcher_(dispatcher), disable_listeners_(false) {} +ConnectionHandlerImpl::ConnectionHandlerImpl(Event::Dispatcher& dispatcher, + const std::string& per_handler_stat_prefix) + : dispatcher_(dispatcher), per_handler_stat_prefix_(per_handler_stat_prefix + "."), + disable_listeners_(false) {} void ConnectionHandlerImpl::incNumConnections() { ++num_connections_; } @@ -31,10 +33,8 @@ void ConnectionHandlerImpl::addListener(Network::ListenerConfig& config) { if (socket_type == Network::Address::SocketType::Stream) { listener = std::make_unique(*this, config); } else { - ASSERT(socket_type == Network::Address::SocketType::Datagram, - "Only datagram/stream listener supported"); - listener = - config.udpListenerFactory()->createActiveUdpListener(*this, dispatcher_, logger_, config); + ASSERT(config.udpListenerFactory() != nullptr, "UDP listener factory is not initialized."); + listener = config.udpListenerFactory()->createActiveUdpListener(*this, dispatcher_, config); } if (disable_listeners_) { @@ -82,8 +82,7 @@ void ConnectionHandlerImpl::enableListeners() { } void ConnectionHandlerImpl::ActiveTcpListener::removeConnection(ActiveConnection& connection) { - ENVOY_CONN_LOG_TO_LOGGER(parent_.logger_, debug, "adding to cleanup list", - *connection.connection_); + ENVOY_CONN_LOG(debug, "adding to cleanup list", *connection.connection_); ActiveConnectionPtr removed = connection.removeFromList(connections_); parent_.dispatcher_.deferredDelete(std::move(removed)); ASSERT(parent_.num_connections_ > 0); @@ -91,8 +90,15 @@ void ConnectionHandlerImpl::ActiveTcpListener::removeConnection(ActiveConnection } ConnectionHandlerImpl::ActiveListenerImplBase::ActiveListenerImplBase( - Network::ListenerPtr&& listener, Network::ListenerConfig& config) - : listener_(std::move(listener)), stats_(generateStats(config.listenerScope())), + Network::ConnectionHandler& parent, Network::ListenerPtr&& listener, + Network::ListenerConfig& config) + : listener_(std::move(listener)), + stats_({ALL_LISTENER_STATS(POOL_COUNTER(config.listenerScope()), + POOL_GAUGE(config.listenerScope()), + POOL_HISTOGRAM(config.listenerScope()))}), + per_worker_stats_({ALL_PER_HANDLER_LISTENER_STATS( + POOL_COUNTER_PREFIX(config.listenerScope(), parent.statPrefix()), + POOL_GAUGE_PREFIX(config.listenerScope(), parent.statPrefix()))}), listener_filters_timeout_(config.listenerFiltersTimeout()), continue_on_listener_filters_timeout_(config.continueOnListenerFiltersTimeout()), listener_tag_(config.listenerTag()), config_(config) {} @@ -108,7 +114,8 @@ ConnectionHandlerImpl::ActiveTcpListener::ActiveTcpListener(ConnectionHandlerImp ConnectionHandlerImpl::ActiveTcpListener::ActiveTcpListener(ConnectionHandlerImpl& parent, Network::ListenerPtr&& listener, Network::ListenerConfig& config) - : ConnectionHandlerImpl::ActiveListenerImplBase(std::move(listener), config), parent_(parent) {} + : ConnectionHandlerImpl::ActiveListenerImplBase(parent, std::move(listener), config), + parent_(parent) {} ConnectionHandlerImpl::ActiveTcpListener::~ActiveTcpListener() { // Purge sockets that have not progressed to connections. This should only happen when @@ -165,11 +172,11 @@ ConnectionHandlerImpl::findActiveListenerByAddress(const Network::Address::Insta void ConnectionHandlerImpl::ActiveSocket::onTimeout() { listener_.stats_.downstream_pre_cx_timeout_.inc(); ASSERT(inserted()); - ENVOY_LOG_TO_LOGGER(listener_.parent_.logger_, debug, "listener filter times out after {} ms", - listener_.listener_filters_timeout_.count()); + ENVOY_LOG(debug, "listener filter times out after {} ms", + listener_.listener_filters_timeout_.count()); if (listener_.continue_on_listener_filters_timeout_) { - ENVOY_LOG_TO_LOGGER(listener_.parent_.logger_, debug, "fallback to default listener filter"); + ENVOY_LOG(debug, "fallback to default listener filter"); newConnection(); } unlink(); @@ -279,8 +286,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::newConnection( // Find matching filter chain. const auto filter_chain = config_.filterChainManager().findFilterChain(*socket); if (filter_chain == nullptr) { - ENVOY_LOG_TO_LOGGER(parent_.logger_, debug, - "closing connection: no matching filter chain found"); + ENVOY_LOG(debug, "closing connection: no matching filter chain found"); stats_.no_filter_chain_match_.inc(); socket->close(); return; @@ -294,8 +300,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::newConnection( const bool empty_filter_chain = !config_.filterChainFactory().createNetworkFilterChain( *new_connection, filter_chain->networkFilterFactories()); if (empty_filter_chain) { - ENVOY_CONN_LOG_TO_LOGGER(parent_.logger_, debug, "closing connection: no filters", - *new_connection); + ENVOY_CONN_LOG(debug, "closing connection: no filters", *new_connection); new_connection->close(Network::ConnectionCloseType::NoFlush); return; } @@ -305,7 +310,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::newConnection( void ConnectionHandlerImpl::ActiveTcpListener::onNewConnection( Network::ConnectionPtr&& new_connection) { - ENVOY_CONN_LOG_TO_LOGGER(parent_.logger_, debug, "new connection", *new_connection); + ENVOY_CONN_LOG(debug, "new connection", *new_connection); // If the connection is already closed, we can just let this connection immediately die. if (new_connection->state() != Network::Connection::State::Closed) { @@ -327,27 +332,28 @@ ConnectionHandlerImpl::ActiveConnection::ActiveConnection(ActiveTcpListener& lis connection_->addConnectionCallbacks(*this); listener_.stats_.downstream_cx_total_.inc(); listener_.stats_.downstream_cx_active_.inc(); + listener_.per_worker_stats_.downstream_cx_total_.inc(); + listener_.per_worker_stats_.downstream_cx_active_.inc(); } ConnectionHandlerImpl::ActiveConnection::~ActiveConnection() { listener_.stats_.downstream_cx_active_.dec(); listener_.stats_.downstream_cx_destroy_.inc(); + listener_.per_worker_stats_.downstream_cx_active_.dec(); conn_length_->complete(); } -ListenerStats ConnectionHandlerImpl::generateStats(Stats::Scope& scope) { - return {ALL_LISTENER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))}; -} - -ActiveUdpListener::ActiveUdpListener(Event::Dispatcher& dispatcher, Network::ListenerConfig& config) - : ActiveUdpListener(dispatcher.createUdpListener(config.socket(), *this), config) {} +ActiveUdpListener::ActiveUdpListener(Network::ConnectionHandler& parent, + Event::Dispatcher& dispatcher, Network::ListenerConfig& config) + : ActiveUdpListener(parent, dispatcher.createUdpListener(config.socket(), *this), config) {} -ActiveUdpListener::ActiveUdpListener(Network::ListenerPtr&& listener, +ActiveUdpListener::ActiveUdpListener(Network::ConnectionHandler& parent, + Network::ListenerPtr&& listener, Network::ListenerConfig& config) - : ConnectionHandlerImpl::ActiveListenerImplBase(std::move(listener), config), - udp_listener_(dynamic_cast(listener_.get())), read_filter_(nullptr) { + : ConnectionHandlerImpl::ActiveListenerImplBase(parent, std::move(listener), config), + udp_listener_(*dynamic_cast(listener_.get())), read_filter_(nullptr) { // TODO(sumukhs): Try to avoid dynamic_cast by coming up with a better interface design - ASSERT(udp_listener_ != nullptr, ""); + ASSERT(dynamic_cast(listener_.get()) != nullptr, ""); // Create the filter chain on creating a new udp listener config_.filterChainFactory().createUdpListenerFilterChain(*this, *this); @@ -380,7 +386,7 @@ void ActiveUdpListener::addReadFilter(Network::UdpListenerReadFilterPtr&& filter read_filter_ = std::move(filter); } -Network::UdpListener& ActiveUdpListener::udpListener() { return *udp_listener_; } +Network::UdpListener& ActiveUdpListener::udpListener() { return udp_listener_; } } // namespace Server } // namespace Envoy diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index ecf1946bf234..187d0252c2b0 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -23,12 +23,6 @@ #include "spdlog/spdlog.h" namespace Envoy { - -namespace Quic { -class ActiveQuicListener; -class EnvoyQuicDispatcher; -} // namespace Quic - namespace Server { #define ALL_LISTENER_STATS(COUNTER, GAUGE, HISTOGRAM) \ @@ -47,13 +41,26 @@ struct ListenerStats { ALL_LISTENER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; +#define ALL_PER_HANDLER_LISTENER_STATS(COUNTER, GAUGE) \ + COUNTER(downstream_cx_total) \ + GAUGE(downstream_cx_active, Accumulate) + +/** + * Wrapper struct for per-handler listener stats. @see stats_macros.h + */ +struct PerHandlerListenerStats { + ALL_PER_HANDLER_LISTENER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) +}; + /** * Server side connection handler. This is used both by workers as well as the * main thread for non-threaded listeners. */ -class ConnectionHandlerImpl : public Network::ConnectionHandler, NonCopyable { +class ConnectionHandlerImpl : public Network::ConnectionHandler, + NonCopyable, + Logger::Loggable { public: - ConnectionHandlerImpl(spdlog::logger& logger, Event::Dispatcher& dispatcher); + ConnectionHandlerImpl(Event::Dispatcher& dispatcher, const std::string& per_handler_stat_prefix); // Network::ConnectionHandler uint64_t numConnections() override { return num_connections_; } @@ -65,6 +72,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, NonCopyable { void stopListeners() override; void disableListeners() override; void enableListeners() override; + const std::string& statPrefix() override { return per_handler_stat_prefix_; } Network::Listener* findListenerByAddress(const Network::Address::Instance& address) override; @@ -76,7 +84,8 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, NonCopyable { */ class ActiveListenerImplBase : public Network::ConnectionHandler::ActiveListener { public: - ActiveListenerImplBase(Network::ListenerPtr&& listener, Network::ListenerConfig& config); + ActiveListenerImplBase(Network::ConnectionHandler& parent, Network::ListenerPtr&& listener, + Network::ListenerConfig& config); // Network::ConnectionHandler::ActiveListener. uint64_t listenerTag() override { return listener_tag_; } @@ -85,6 +94,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, NonCopyable { Network::ListenerPtr listener_; ListenerStats stats_; + PerHandlerListenerStats per_worker_stats_; const std::chrono::milliseconds listener_filters_timeout_; const bool continue_on_listener_filters_timeout_; const uint64_t listener_tag_; @@ -92,28 +102,19 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, NonCopyable { }; private: - class ActiveUdpListener; - using ActiveUdpListenerPtr = std::unique_ptr; - class ActiveTcpListener; - using ActiveTcpListenerPtr = std::unique_ptr; struct ActiveConnection; using ActiveConnectionPtr = std::unique_ptr; struct ActiveSocket; using ActiveSocketPtr = std::unique_ptr; - friend class Quic::ActiveQuicListener; - friend class Quic::EnvoyQuicDispatcher; - /** * Wrapper for an active tcp listener owned by this handler. */ class ActiveTcpListener : public Network::ListenerCallbacks, public ActiveListenerImplBase { public: ActiveTcpListener(ConnectionHandlerImpl& parent, Network::ListenerConfig& config); - ActiveTcpListener(ConnectionHandlerImpl& parent, Network::ListenerPtr&& listener, Network::ListenerConfig& config); - ~ActiveTcpListener() override; // Network::ListenerCallbacks @@ -205,10 +206,8 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, NonCopyable { Event::TimerPtr timer_; }; - static ListenerStats generateStats(Stats::Scope& scope); - - spdlog::logger& logger_; Event::Dispatcher& dispatcher_; + const std::string per_handler_stat_prefix_; std::list> listeners_; @@ -225,9 +224,10 @@ class ActiveUdpListener : public Network::UdpListenerCallbacks, public Network::UdpListenerFilterManager, public Network::UdpReadFilterCallbacks { public: - ActiveUdpListener(Event::Dispatcher& dispatcher, Network::ListenerConfig& config); - - ActiveUdpListener(Network::ListenerPtr&& listener, Network::ListenerConfig& config); + ActiveUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, + Network::ListenerConfig& config); + ActiveUdpListener(Network::ConnectionHandler& parent, Network::ListenerPtr&& listener, + Network::ListenerConfig& config); // Network::UdpListenerCallbacks void onData(Network::UdpRecvData& data) override; @@ -242,7 +242,7 @@ class ActiveUdpListener : public Network::UdpListenerCallbacks, Network::UdpListener& udpListener() override; private: - Network::UdpListener* udp_listener_; + Network::UdpListener& udp_listener_; Network::UdpListenerReadFilterPtr read_filter_; }; diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index 5b0b80163478..a856511f5654 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -19,9 +19,10 @@ namespace Server { GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuration::Main& config, Api::Api& api, std::unique_ptr&& test_interlock) - : test_interlock_hook_(std::move(test_interlock)), time_source_(api.timeSource()), - miss_timeout_(config.wdMissTimeout()), megamiss_timeout_(config.wdMegaMissTimeout()), - kill_timeout_(config.wdKillTimeout()), multi_kill_timeout_(config.wdMultiKillTimeout()), + : test_interlock_hook_(std::move(test_interlock)), stats_scope_(stats_scope), + time_source_(api.timeSource()), miss_timeout_(config.wdMissTimeout()), + megamiss_timeout_(config.wdMegaMissTimeout()), kill_timeout_(config.wdKillTimeout()), + multi_kill_timeout_(config.wdMultiKillTimeout()), loop_interval_([&]() -> std::chrono::milliseconds { // The loop interval is simply the minimum of all specified intervals, // but we must account for the 0=disabled case. This lambda takes care @@ -62,36 +63,38 @@ void GuardDogImpl::step() { bool seen_one_multi_timeout(false); Thread::LockGuard guard(wd_lock_); for (auto& watched_dog : watched_dogs_) { - const auto ltt = watched_dog.dog_->lastTouchTime(); + const auto ltt = watched_dog->dog_->lastTouchTime(); const auto delta = now - ltt; - if (watched_dog.last_alert_time_ && watched_dog.last_alert_time_.value() < ltt) { - watched_dog.miss_alerted_ = false; - watched_dog.megamiss_alerted_ = false; + if (watched_dog->last_alert_time_ && watched_dog->last_alert_time_.value() < ltt) { + watched_dog->miss_alerted_ = false; + watched_dog->megamiss_alerted_ = false; } if (delta > miss_timeout_) { - if (!watched_dog.miss_alerted_) { + if (!watched_dog->miss_alerted_) { watchdog_miss_counter_.inc(); - watched_dog.last_alert_time_ = ltt; - watched_dog.miss_alerted_ = true; + watched_dog->miss_counter_.inc(); + watched_dog->last_alert_time_ = ltt; + watched_dog->miss_alerted_ = true; } } if (delta > megamiss_timeout_) { - if (!watched_dog.megamiss_alerted_) { + if (!watched_dog->megamiss_alerted_) { watchdog_megamiss_counter_.inc(); - watched_dog.last_alert_time_ = ltt; - watched_dog.megamiss_alerted_ = true; + watched_dog->megamiss_counter_.inc(); + watched_dog->last_alert_time_ = ltt; + watched_dog->megamiss_alerted_ = true; } } if (killEnabled() && delta > kill_timeout_) { PANIC(fmt::format("GuardDog: one thread ({}) stuck for more than watchdog_kill_timeout", - watched_dog.dog_->threadId().debugString())); + watched_dog->dog_->threadId().debugString())); } if (multikillEnabled() && delta > multi_kill_timeout_) { if (seen_one_multi_timeout) { PANIC(fmt::format( "GuardDog: multiple threads ({},...) stuck for more than watchdog_multikill_timeout", - watched_dog.dog_->threadId().debugString())); + watched_dog->dog_->threadId().debugString())); } else { seen_one_multi_timeout = true; } @@ -108,19 +111,19 @@ void GuardDogImpl::step() { } } -WatchDogSharedPtr GuardDogImpl::createWatchDog(Thread::ThreadId thread_id) { +WatchDogSharedPtr GuardDogImpl::createWatchDog(Thread::ThreadId thread_id, + const std::string& thread_name) { // Timer started by WatchDog will try to fire at 1/2 of the interval of the // minimum timeout specified. loop_interval_ is const so all shared state // accessed out of the locked section below is const (time_source_ has no // state). - auto wd_interval = loop_interval_ / 2; + const auto wd_interval = loop_interval_ / 2; WatchDogSharedPtr new_watchdog = std::make_shared(std::move(thread_id), time_source_, wd_interval); - WatchedDog watched_dog; - watched_dog.dog_ = new_watchdog; + WatchedDogPtr watched_dog = std::make_unique(stats_scope_, thread_name, new_watchdog); { Thread::LockGuard guard(wd_lock_); - watched_dogs_.push_back(watched_dog); + watched_dogs_.push_back(std::move(watched_dog)); } new_watchdog->touch(); return new_watchdog; @@ -129,7 +132,7 @@ WatchDogSharedPtr GuardDogImpl::createWatchDog(Thread::ThreadId thread_id) { void GuardDogImpl::stopWatching(WatchDogSharedPtr wd) { Thread::LockGuard guard(wd_lock_); auto found_wd = std::find_if(watched_dogs_.begin(), watched_dogs_.end(), - [&wd](const WatchedDog& d) -> bool { return d.dog_ == wd; }); + [&wd](const WatchedDogPtr& d) -> bool { return d->dog_ == wd; }); if (found_wd != watched_dogs_.end()) { watched_dogs_.erase(found_wd); } else { @@ -156,5 +159,17 @@ void GuardDogImpl::stop() { } } +GuardDogImpl::WatchedDog::WatchedDog(Stats::Scope& stats_scope, const std::string& thread_name, + const WatchDogSharedPtr& watch_dog) + : dog_(watch_dog), + miss_counter_(stats_scope.counterFromStatName( + Stats::StatNameManagedStorage(fmt::format("server.{}.watchdog_miss", thread_name), + stats_scope.symbolTable()) + .statName())), + megamiss_counter_(stats_scope.counterFromStatName( + Stats::StatNameManagedStorage(fmt::format("server.{}.watchdog_mega_miss", thread_name), + stats_scope.symbolTable()) + .statName())) {} + } // namespace Server } // namespace Envoy diff --git a/source/server/guarddog_impl.h b/source/server/guarddog_impl.h index 6d50a2e23fed..3d17829e9a20 100644 --- a/source/server/guarddog_impl.h +++ b/source/server/guarddog_impl.h @@ -87,7 +87,8 @@ class GuardDogImpl : public GuardDog { } // Server::GuardDog - WatchDogSharedPtr createWatchDog(Thread::ThreadId thread_id) override; + WatchDogSharedPtr createWatchDog(Thread::ThreadId thread_id, + const std::string& thread_name) override; void stopWatching(WatchDogSharedPtr wd) override; private: @@ -100,13 +101,20 @@ class GuardDogImpl : public GuardDog { bool multikillEnabled() const { return multi_kill_timeout_ > std::chrono::milliseconds(0); } struct WatchedDog { - WatchDogSharedPtr dog_; + WatchedDog(Stats::Scope& stats_scope, const std::string& thread_name, + const WatchDogSharedPtr& watch_dog); + + const WatchDogSharedPtr dog_; absl::optional last_alert_time_; bool miss_alerted_{}; bool megamiss_alerted_{}; + Stats::Counter& miss_counter_; + Stats::Counter& megamiss_counter_; }; + using WatchedDogPtr = std::unique_ptr; std::unique_ptr test_interlock_hook_; + Stats::Scope& stats_scope_; TimeSource& time_source_; const std::chrono::milliseconds miss_timeout_; const std::chrono::milliseconds megamiss_timeout_; @@ -115,7 +123,7 @@ class GuardDogImpl : public GuardDog { const std::chrono::milliseconds loop_interval_; Stats::Counter& watchdog_miss_counter_; Stats::Counter& watchdog_megamiss_counter_; - std::vector watched_dogs_ ABSL_GUARDED_BY(wd_lock_); + std::vector watched_dogs_ ABSL_GUARDED_BY(wd_lock_); Thread::MutexBasicLockable wd_lock_; Thread::ThreadPtr thread_; Event::DispatcherPtr dispatcher_; diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 957603de82f1..d0cce626afb1 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -106,8 +106,9 @@ void LdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrFieldisTransportConnectionless())) { + // If we got here, this is a tcp listener or connection-oriented udp listener, so ensure there + // is a filter chain specified throw EnvoyException(fmt::format("error adding listener '{}': no filter chains specified", address_->asString())); + } else if (udp_listener_factory_ != nullptr && + !udp_listener_factory_->isTransportConnectionless()) { + for (auto& filter_chain : config.filter_chains()) { + // Early fail if any filter chain doesn't have transport socket configured. + if (!filter_chain.has_transport_socket()) { + throw EnvoyException(fmt::format("error adding listener '{}': no transport socket " + "specified for connection oriented UDP listener", + address_->asString())); + } + } + } + + Server::Configuration::TransportSocketFactoryContextImpl factory_context( + parent_.server_.admin(), parent_.server_.sslContextManager(), *listener_scope_, + parent_.server_.clusterManager(), parent_.server_.localInfo(), parent_.server_.dispatcher(), + parent_.server_.random(), parent_.server_.stats(), parent_.server_.singletonManager(), + parent_.server_.threadLocal(), validation_visitor, parent_.server_.api()); + factory_context.setInitManager(initManager()); + ListenerFilterChainFactoryBuilder builder(*this, factory_context); + filter_chain_manager_.addFilterChain(config.filter_chains(), builder); + + if (socket_type_ == Network::Address::SocketType::Datagram) { + return; } + // TCP specific setup. if (config.has_tcp_fast_open_queue_length()) { addListenSocketOptions(Network::SocketOptionFactory::buildTcpFastOpenOptions( config.tcp_fast_open_queue_length().value())); @@ -292,14 +317,6 @@ ListenerImpl::ListenerImpl(const envoy::api::v2::Listener& config, const std::st factory.createFilterFactoryFromProto(Envoy::ProtobufWkt::Empty(), *this)); } - Server::Configuration::TransportSocketFactoryContextImpl factory_context( - parent_.server_.admin(), parent_.server_.sslContextManager(), *listener_scope_, - parent_.server_.clusterManager(), parent_.server_.localInfo(), parent_.server_.dispatcher(), - parent_.server_.random(), parent_.server_.stats(), parent_.server_.singletonManager(), - parent_.server_.threadLocal(), validation_visitor, parent_.server_.api()); - factory_context.setInitManager(initManager()); - ListenerFilterChainFactoryBuilder builder(*this, factory_context); - filter_chain_manager_.addFilterChain(config.filter_chains(), builder); const bool need_tls_inspector = std::any_of( config.filter_chains().begin(), config.filter_chains().end(), @@ -444,7 +461,8 @@ ListenerManagerImpl::ListenerManagerImpl(Instance& server, "listeners", [this] { return dumpListenerConfigs(); })), enable_dispatcher_stats_(enable_dispatcher_stats) { for (uint32_t i = 0; i < server.options().concurrency(); i++) { - workers_.emplace_back(worker_factory.createWorker(server.overloadManager())); + workers_.emplace_back( + worker_factory.createWorker(server.overloadManager(), fmt::format("worker_{}", i))); } } diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index 35bd7ddfee28..599f471ea7fe 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -324,9 +324,7 @@ class ListenerImpl : public Network::ListenerConfig, ServerLifecycleNotifier& lifecycleNotifier() override { return parent_.server_.lifecycleNotifier(); } - absl::optional> processContext() override { - return parent_.server_.processContext(); - } + OptProcessContextRef processContext() override { return parent_.server_.processContext(); } // Network::DrainDecision bool drainClose() const override; diff --git a/source/server/server.cc b/source/server/server.cc index a92f83dd9620..c6282265df7a 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -61,10 +61,12 @@ InstanceImpl::InstanceImpl(const Options& options, Event::TimeSystem& time_syste !options.rejectUnknownDynamicFields()), time_source_(time_system), restarter_(restarter), start_time_(time(nullptr)), original_start_time_(start_time_), stats_store_(store), thread_local_(tls), - api_(new Api::Impl(thread_factory, store, time_system, file_system)), + api_(new Api::Impl(thread_factory, store, time_system, file_system, + process_context ? OptProcessContextRef(std::ref(*process_context)) + : absl::nullopt)), dispatcher_(api_->allocateDispatcher()), singleton_manager_(new Singleton::ManagerImpl(api_->threadFactory())), - handler_(new ConnectionHandlerImpl(ENVOY_LOGGER(), *dispatcher_)), + handler_(new ConnectionHandlerImpl(*dispatcher_, "main_thread")), random_generator_(std::move(random_generator)), listener_component_factory_(*this), worker_factory_(thread_local_, *api_, hooks), dns_resolver_(dispatcher_->createDnsResolver({})), @@ -530,7 +532,8 @@ void InstanceImpl::run() { // Run the main dispatch loop waiting to exit. ENVOY_LOG(info, "starting main dispatch loop"); - auto watchdog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + auto watchdog = + guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "main_thread"); watchdog->startWatchdog(*dispatcher_); dispatcher_->post([this] { notifyCallbacksForStage(Stage::Startup); }); dispatcher_->run(Event::Dispatcher::RunType::Block); diff --git a/source/server/server.h b/source/server/server.h index 006b39d99cbb..2ed83f1df2eb 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -194,9 +194,7 @@ class InstanceImpl : Logger::Loggable, Stats::Store& stats() override { return stats_store_; } Grpc::Context& grpcContext() override { return grpc_context_; } Http::Context& httpContext() override { return http_context_; } - absl::optional> processContext() override { - return *process_context_; - } + OptProcessContextRef processContext() override { return *process_context_; } ThreadLocal::Instance& threadLocal() override { return thread_local_; } const LocalInfo::LocalInfo& localInfo() override { return *local_info_; } TimeSource& timeSource() override { return time_source_; } diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index bf38d2181d85..4c829abf1e9b 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -13,19 +13,21 @@ namespace Envoy { namespace Server { -WorkerPtr ProdWorkerFactory::createWorker(OverloadManager& overload_manager) { +WorkerPtr ProdWorkerFactory::createWorker(OverloadManager& overload_manager, + const std::string& worker_name) { Event::DispatcherPtr dispatcher(api_.allocateDispatcher()); return WorkerPtr{new WorkerImpl( tls_, hooks_, std::move(dispatcher), - Network::ConnectionHandlerPtr{new ConnectionHandlerImpl(ENVOY_LOGGER(), *dispatcher)}, - overload_manager, api_)}; + Network::ConnectionHandlerPtr{new ConnectionHandlerImpl(*dispatcher, worker_name)}, + overload_manager, api_, worker_name)}; } WorkerImpl::WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, Event::DispatcherPtr&& dispatcher, Network::ConnectionHandlerPtr handler, - OverloadManager& overload_manager, Api::Api& api) + OverloadManager& overload_manager, Api::Api& api, + const std::string& worker_name) : tls_(tls), hooks_(hooks), dispatcher_(std::move(dispatcher)), handler_(std::move(handler)), - api_(api) { + api_(api), worker_name_(worker_name) { tls_.registerThread(*dispatcher_, false); overload_manager.registerForAction( OverloadActionNames::get().StopAcceptingConnections, *dispatcher_, @@ -99,18 +101,22 @@ void WorkerImpl::stopListeners() { void WorkerImpl::threadRoutine(GuardDog& guard_dog) { ENVOY_LOG(debug, "worker entering dispatch loop"); - auto watchdog = guard_dog.createWatchDog(api_.threadFactory().currentThreadId()); - watchdog->startWatchdog(*dispatcher_); + // The watch dog must be created after the dispatcher starts running and has post events flushed, + // as this is when TLS stat scopes start working. + dispatcher_->post([this, &guard_dog]() { + watch_dog_ = guard_dog.createWatchDog(api_.threadFactory().currentThreadId(), worker_name_); + watch_dog_->startWatchdog(*dispatcher_); + }); dispatcher_->run(Event::Dispatcher::RunType::Block); ENVOY_LOG(debug, "worker exited dispatch loop"); - guard_dog.stopWatching(watchdog); + guard_dog.stopWatching(watch_dog_); // We must close all active connections before we actually exit the thread. This prevents any // destructors from running on the main thread which might reference thread locals. Destroying // the handler does this which additionally purges the dispatcher delayed deletion list. handler_.reset(); tls_.shutdownThread(); - watchdog.reset(); + watch_dog_.reset(); } void WorkerImpl::stopAcceptingConnectionsCb(OverloadActionState state) { diff --git a/source/server/worker_impl.h b/source/server/worker_impl.h index 3e56578303ca..0f27d0f6f824 100644 --- a/source/server/worker_impl.h +++ b/source/server/worker_impl.h @@ -23,7 +23,8 @@ class ProdWorkerFactory : public WorkerFactory, Logger::Loggable { public: WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, Event::DispatcherPtr&& dispatcher, Network::ConnectionHandlerPtr handler, OverloadManager& overload_manager, - Api::Api& api); + Api::Api& api, const std::string& worker_name); // Server::Worker void addListener(Network::ListenerConfig& listener, AddListenerCompletion completion) override; @@ -60,6 +61,8 @@ class WorkerImpl : public Worker, Logger::Loggable { Network::ConnectionHandlerPtr handler_; Api::Api& api_; Thread::ThreadPtr thread_; + const std::string worker_name_; + WatchDogSharedPtr watch_dog_; }; } // namespace Server diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/access_log/access_log_formatter_test.cc index e05f0911a129..73f2e50db962 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/access_log/access_log_formatter_test.cc @@ -202,6 +202,16 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_EQ("127.0.0.1:0", upstream_format.format(header, header, header, stream_info)); } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT"); + EXPECT_EQ("127.0.0.1", upstream_format.format(header, header, header, stream_info)); + } + + { + StreamInfoFormatter upstream_format("DOWNSTREAM_DIRECT_REMOTE_ADDRESS"); + EXPECT_EQ("127.0.0.1:0", upstream_format.format(header, header, header, stream_info)); + } + { StreamInfoFormatter upstream_format("REQUESTED_SERVER_NAME"); std::string requested_server_name = "stub_server"; diff --git a/test/common/access_log/access_log_manager_impl_test.cc b/test/common/access_log/access_log_manager_impl_test.cc index 3b28ee6526a2..d013cddfc305 100644 --- a/test/common/access_log/access_log_manager_impl_test.cc +++ b/test/common/access_log/access_log_manager_impl_test.cc @@ -306,6 +306,55 @@ TEST_F(AccessLogManagerImplTest, reopenFile) { } } +// Test that the flush timer will trigger file reopen even if no data is waiting. +TEST_F(AccessLogManagerImplTest, reopenFileOnTimerOnly) { + NiceMock* timer = new NiceMock(&dispatcher_); + + Sequence sq; + EXPECT_CALL(*file_, open_(_)) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + AccessLogFileSharedPtr log_file = access_log_manager_.createAccessLog("foo"); + + EXPECT_CALL(*file_, write_(_)) + .InSequence(sq) + .WillOnce(Invoke([](absl::string_view data) -> Api::IoCallSizeResult { + EXPECT_EQ(0, data.compare("before")); + return Filesystem::resultSuccess(static_cast(data.length())); + })); + + log_file->write("before"); + timer->invokeCallback(); + + { + Thread::LockGuard lock(file_->write_mutex_); + while (file_->num_writes_ != 1) { + file_->write_event_.wait(file_->write_mutex_); + } + } + + EXPECT_CALL(*file_, close_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + EXPECT_CALL(*file_, open_(_)) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + + EXPECT_CALL(*file_, close_()) + .InSequence(sq) + .WillOnce(Return(ByMove(Filesystem::resultSuccess(true)))); + + log_file->reopen(); + timer->invokeCallback(); + + { + Thread::LockGuard lock(file_->open_mutex_); + while (file_->num_opens_ != 2) { + file_->open_event_.wait(file_->open_mutex_); + } + } +} + TEST_F(AccessLogManagerImplTest, reopenThrows) { NiceMock* timer = new NiceMock(&dispatcher_); diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc index 1cf63847da3f..96b4f3fbdbb3 100644 --- a/test/common/buffer/buffer_fuzz.cc +++ b/test/common/buffer/buffer_fuzz.cc @@ -10,6 +10,7 @@ #include "common/memory/stats.h" #include "common/network/io_socket_handle_impl.h" +#include "absl/strings/match.h" #include "gtest/gtest.h" // Strong assertion that applies across all compilation modes and doesn't rely @@ -127,6 +128,8 @@ class StringBuffer : public Buffer::Instance { return data_.find(std::string(static_cast(data), size), start); } + bool startsWith(absl::string_view data) const override { return absl::StartsWith(data_, data); } + std::string toString() const override { return data_; } Api::IoCallUint64Result write(Network::IoHandle& io_handle) override { diff --git a/test/common/buffer/buffer_speed_test.cc b/test/common/buffer/buffer_speed_test.cc index e74203fc802e..0d9937e4cd62 100644 --- a/test/common/buffer/buffer_speed_test.cc +++ b/test/common/buffer/buffer_speed_test.cc @@ -317,6 +317,54 @@ static void BufferSearchPartialMatch(benchmark::State& state) { } BENCHMARK(BufferSearchPartialMatch)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); +// Test buffer startsWith, for the simple case where there is no match for the pattern at the start +// of the buffer. +static void BufferStartsWith(benchmark::State& state) { + const std::string Pattern(16, 'b'); + std::string data; + data.reserve(state.range(0) + Pattern.length()); + data += std::string(state.range(0), 'a'); + data += Pattern; + + const absl::string_view input(data); + Buffer::OwnedImpl buffer(input); + ssize_t result = 0; + for (auto _ : state) { + if (!buffer.startsWith({Pattern.c_str(), Pattern.length()})) { + result++; + } + } + benchmark::DoNotOptimize(result); +} +BENCHMARK(BufferStartsWith)->Arg(1)->Arg(4096)->Arg(16384)->Arg(65536); + +// Test buffer startsWith, when there is a match at the start of the buffer. +static void BufferStartsWithMatch(benchmark::State& state) { + const std::string Prefix(state.range(1), 'b'); + const std::string Suffix("babbabbbabbbbabbbbbabbbbbbabbbbbbbabbbbbbbba"); + std::string data = Prefix; + size_t num_suffixes = 1 + state.range(0) / Prefix.length(); + data.reserve(Suffix.length() * num_suffixes + Prefix.length()); + for (size_t i = 0; i < num_suffixes; i++) { + data += Suffix; + } + + const absl::string_view input(data); + Buffer::OwnedImpl buffer(input); + ssize_t result = 0; + for (auto _ : state) { + if (buffer.startsWith({Prefix.c_str(), Prefix.length()})) { + result++; + } + } + benchmark::DoNotOptimize(result); +} +BENCHMARK(BufferStartsWithMatch) + ->Args({1, 1}) + ->Args({4096, 16}) + ->Args({16384, 256}) + ->Args({65536, 4096}); + } // namespace Envoy // Boilerplate main(), which discovers benchmarks in the same file and runs them. diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index 282ae1961cf5..19e6daa06fb6 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -482,6 +482,29 @@ TEST_P(OwnedImplTest, Search) { EXPECT_EQ(-1, buffer.search("abaaaabaaaaabaa", 15, 0)); } +TEST_P(OwnedImplTest, StartsWith) { + // Populate a buffer with a string split across many small slices, to + // exercise edge cases in the startsWith implementation. + static const char* Inputs[] = {"ab", "a", "", "aaa", "b", "a", "aaa", "ab", "a"}; + Buffer::OwnedImpl buffer; + verifyImplementation(buffer); + for (const auto& input : Inputs) { + buffer.appendSliceForTest(input); + } + EXPECT_STREQ("abaaaabaaaaaba", buffer.toString().c_str()); + + EXPECT_FALSE(buffer.startsWith({"abaaaabaaaaabaXXX", 17})); + EXPECT_FALSE(buffer.startsWith({"c", 1})); + EXPECT_TRUE(buffer.startsWith({"", 0})); + EXPECT_TRUE(buffer.startsWith({"a", 1})); + EXPECT_TRUE(buffer.startsWith({"ab", 2})); + EXPECT_TRUE(buffer.startsWith({"aba", 3})); + EXPECT_TRUE(buffer.startsWith({"abaa", 4})); + EXPECT_TRUE(buffer.startsWith({"abaaaab", 7})); + EXPECT_TRUE(buffer.startsWith({"abaaaabaaaaaba", 14})); + EXPECT_FALSE(buffer.startsWith({"ba", 2})); +} + TEST_P(OwnedImplTest, ToString) { Buffer::OwnedImpl buffer; verifyImplementation(buffer); diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index 44ae41e1b317..6d6f043c310d 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -253,6 +253,14 @@ TEST_P(WatermarkBufferTest, Search) { EXPECT_EQ(-1, buffer_.search(&TEN_BYTES[1], 2, 5)); } +TEST_P(WatermarkBufferTest, StartsWith) { + buffer_.add(TEN_BYTES, 10); + + EXPECT_TRUE(buffer_.startsWith({TEN_BYTES, 2})); + EXPECT_TRUE(buffer_.startsWith({TEN_BYTES, 10})); + EXPECT_FALSE(buffer_.startsWith({&TEN_BYTES[1], 2})); +} + TEST_P(WatermarkBufferTest, MoveBackWithWatermarks) { int high_watermark_buffer1 = 0; int low_watermark_buffer1 = 0; diff --git a/test/common/config/delta_subscription_state_test.cc b/test/common/config/delta_subscription_state_test.cc index 83975471c114..462a91e2184d 100644 --- a/test/common/config/delta_subscription_state_test.cc +++ b/test/common/config/delta_subscription_state_test.cc @@ -374,6 +374,14 @@ TEST_F(DeltaSubscriptionStateTest, AddedAndRemoved) { ack.error_detail_.message()); } +TEST_F(DeltaSubscriptionStateTest, handleEstablishmentFailure) { + EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _)).Times(0); + + state_.handleEstablishmentFailure(); + EXPECT_EQ(stats_.update_failure_.value(), 1); + EXPECT_EQ(stats_.update_attempt_.value(), 1); +} + } // namespace } // namespace Config } // namespace Envoy diff --git a/test/common/config/grpc_subscription_impl_test.cc b/test/common/config/grpc_subscription_impl_test.cc index 7a1ca435985d..e79995b28f39 100644 --- a/test/common/config/grpc_subscription_impl_test.cc +++ b/test/common/config/grpc_subscription_impl_test.cc @@ -15,8 +15,10 @@ TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { InSequence s; EXPECT_CALL(*async_client_, startRaw(_, _, _)).WillOnce(Return(nullptr)); + // onConfigUpdateFailed() should not be called for gRPC stream connection failure EXPECT_CALL(callbacks_, - onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)); + onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) + .Times(0); EXPECT_CALL(random_, random()); EXPECT_CALL(*timer_, enableTimer(_, _)); subscription_->start({"cluster0", "cluster1"}); @@ -38,8 +40,10 @@ TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { startSubscription({"cluster0", "cluster1"}); EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0)); + // onConfigUpdateFailed() should not be called for gRPC stream connection failure EXPECT_CALL(callbacks_, - onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)); + onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) + .Times(0); EXPECT_CALL(*timer_, enableTimer(_, _)); EXPECT_CALL(random_, random()); subscription_->grpcMux().grpcStreamForTest().onRemoteClose(Grpc::Status::GrpcStatus::Canceled, diff --git a/test/common/config/http_subscription_impl_test.cc b/test/common/config/http_subscription_impl_test.cc index afd592c0220e..87265898905e 100644 --- a/test/common/config/http_subscription_impl_test.cc +++ b/test/common/config/http_subscription_impl_test.cc @@ -16,7 +16,8 @@ TEST_F(HttpSubscriptionImplTest, OnRequestReset) { EXPECT_CALL(random_gen_, random()).WillOnce(Return(0)); EXPECT_CALL(*timer_, enableTimer(_, _)); EXPECT_CALL(callbacks_, - onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)); + onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) + .Times(0); http_callbacks_->onFailure(Http::AsyncClient::FailureReason::Reset); EXPECT_TRUE(statsAre(1, 0, 0, 1, 0, 0)); timerTick(); @@ -34,14 +35,14 @@ TEST_F(HttpSubscriptionImplTest, BadJsonRecovery) { EXPECT_CALL(random_gen_, random()).WillOnce(Return(0)); EXPECT_CALL(*timer_, enableTimer(_, _)); EXPECT_CALL(callbacks_, - onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)); + onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); http_callbacks_->onSuccess(std::move(message)); - EXPECT_TRUE(statsAre(1, 0, 0, 1, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 1, 0, 0, 0)); request_in_progress_ = false; timerTick(); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0)); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, 7148434200721666028)); } TEST_F(HttpSubscriptionImplTest, ConfigNotModified) { diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index c5cd9c4c771a..403ad4d5f91f 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -302,7 +302,8 @@ TEST_F(SubscriptionFactoryTest, GrpcSubscription) { })); EXPECT_CALL(random_, random()); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2); - EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _)); + // onConfigUpdateFailed() should not be called for gRPC stream connection failure + EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _)).Times(0); subscriptionFromConfigSource(config)->start({"static_cluster"}); } diff --git a/test/common/config/subscription_impl_test.cc b/test/common/config/subscription_impl_test.cc index 622a268c90cb..c45f19bd00e8 100644 --- a/test/common/config/subscription_impl_test.cc +++ b/test/common/config/subscription_impl_test.cc @@ -149,7 +149,11 @@ TEST_P(SubscriptionImplInitFetchTimeoutTest, InitialFetchTimeout) { expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); startSubscription({"cluster0", "cluster1"}); statsAre(1, 0, 0, 0, 0, 0); + if (GetParam() == SubscriptionType::Http) { + expectDisableInitFetchTimeoutTimer(); + } expectConfigUpdateFailed(); + callInitFetchTimeoutCb(); statsAre(1, 0, 0, 0, 1, 0); } diff --git a/test/common/event/BUILD b/test/common/event/BUILD index 4e432fb57625..f3215f14dd1d 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -35,19 +35,3 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) - -envoy_cc_test( - name = "dispatched_thread_impl_test", - srcs = ["dispatched_thread_impl_test.cc"], - deps = [ - "//source/common/api:api_lib", - "//source/common/common:utility_lib", - "//source/common/event:dispatched_thread_lib", - "//source/server:guarddog_lib", - "//test/mocks:common_lib", - "//test/mocks/server:server_mocks", - "//test/mocks/stats:stats_mocks", - "//test/test_common:test_time_lib", - "//test/test_common:utility_lib", - ], -) diff --git a/test/common/event/dispatched_thread_impl_test.cc b/test/common/event/dispatched_thread_impl_test.cc deleted file mode 100644 index 704fbc354512..000000000000 --- a/test/common/event/dispatched_thread_impl_test.cc +++ /dev/null @@ -1,51 +0,0 @@ -#include - -#include "common/api/api_impl.h" -#include "common/common/utility.h" -#include "common/event/dispatched_thread.h" - -#include "server/guarddog_impl.h" - -#include "test/mocks/common.h" -#include "test/mocks/server/mocks.h" -#include "test/mocks/stats/mocks.h" -#include "test/test_common/utility.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::InSequence; -using testing::NiceMock; - -namespace Envoy { -namespace Event { -namespace { - -class DispatchedThreadTest : public testing::Test { -protected: - DispatchedThreadTest() - : config_(1000, 1000, 1000, 1000), api_(Api::createApiForTest(fakestats_)), thread_(*api_), - guard_dog_(fakestats_, config_, *api_) {} - - void SetUp() override { thread_.start(guard_dog_); } - - NiceMock config_; - Stats::IsolatedStoreImpl fakestats_; - Api::ApiPtr api_; - DispatchedThreadImpl thread_; - Envoy::Server::GuardDogImpl guard_dog_; -}; - -TEST_F(DispatchedThreadTest, PostCallbackTest) { - InSequence s; - ReadyWatcher watcher; - - EXPECT_CALL(watcher, ready()); - thread_.dispatcher().post([&watcher]() { watcher.ready(); }); - - thread_.exit(); -} - -} // namespace -} // namespace Event -} // namespace Envoy diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc index b2ab23f919ad..68128c7faf71 100644 --- a/test/common/grpc/common_test.cc +++ b/test/common/grpc/common_test.cc @@ -78,6 +78,28 @@ TEST(GrpcContextTest, GetGrpcTimeout) { // so we don't test for them. } +TEST(GrpcCommonTest, GrpcStatusDetailsBin) { + Http::TestHeaderMapImpl empty_trailers; + EXPECT_FALSE(Common::getGrpcStatusDetailsBin(empty_trailers)); + + Http::TestHeaderMapImpl invalid_value{{"grpc-status-details-bin", "invalid"}}; + EXPECT_FALSE(Common::getGrpcStatusDetailsBin(invalid_value)); + + Http::TestHeaderMapImpl unpadded_value{ + {"grpc-status-details-bin", "CAUSElJlc291cmNlIG5vdCBmb3VuZA"}}; + auto status = Common::getGrpcStatusDetailsBin(unpadded_value); + ASSERT_TRUE(status); + EXPECT_EQ(Status::GrpcStatus::NotFound, status->code()); + EXPECT_EQ("Resource not found", status->message()); + + Http::TestHeaderMapImpl padded_value{ + {"grpc-status-details-bin", "CAUSElJlc291cmNlIG5vdCBmb3VuZA=="}}; + status = Common::getGrpcStatusDetailsBin(padded_value); + ASSERT_TRUE(status); + EXPECT_EQ(Status::GrpcStatus::NotFound, status->code()); + EXPECT_EQ("Resource not found", status->message()); +} + TEST(GrpcContextTest, ToGrpcTimeout) { Http::HeaderString value; diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 2e591a77cf92..ebe2bd70a56c 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -188,6 +188,13 @@ TEST_F(ConnectionManagerUtilityTest, DetermineNextProtocol) { Buffer::OwnedImpl data("PRI * HTTP/"); EXPECT_EQ("", ConnectionManagerUtility::determineNextProtocol(connection, data)); } + + { + Network::MockConnection connection; + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("")); + Buffer::OwnedImpl data(" PRI * HTTP/2"); + EXPECT_EQ("", ConnectionManagerUtility::determineNextProtocol(connection, data)); + } } // Verify external request and XFF is set when we are using remote address and the address is diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 945bec99d5f1..117b2cbc15c1 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -4,6 +4,7 @@ #include "envoy/config/bootstrap/v2/bootstrap.pb.h" #include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" +#include "common/common/base64.h" #include "common/protobuf/message_validator_impl.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" @@ -118,6 +119,26 @@ TEST_F(ProtobufUtilityTest, evaluateFractionalPercent) { } // namespace ProtobufPercentHelper +TEST_F(ProtobufUtilityTest, MessageUtilHash) { + ProtobufWkt::Struct s; + (*s.mutable_fields())["ab"].set_string_value("fgh"); + (*s.mutable_fields())["cde"].set_string_value("ij"); + + ProtobufWkt::Any a1; + a1.PackFrom(s); + // The two base64 encoded Struct to test map is identical to the struct above, this tests whether + // a map is deterministically serialized and hashed. + ProtobufWkt::Any a2 = a1; + a2.set_value(Base64::decode("CgsKA2NkZRIEGgJpagoLCgJhYhIFGgNmZ2g=")); + ProtobufWkt::Any a3 = a1; + a3.set_value(Base64::decode("CgsKAmFiEgUaA2ZnaAoLCgNjZGUSBBoCaWo=")); + + EXPECT_EQ(MessageUtil::hash(a1), MessageUtil::hash(a2)); + EXPECT_EQ(MessageUtil::hash(a2), MessageUtil::hash(a3)); + EXPECT_NE(0, MessageUtil::hash(a1)); + EXPECT_NE(MessageUtil::hash(s), MessageUtil::hash(a1)); +} + TEST_F(ProtobufUtilityTest, RepeatedPtrUtilDebugString) { Protobuf::RepeatedPtrField repeated; EXPECT_EQ("[]", RepeatedPtrUtil::debugString(repeated)); @@ -658,6 +679,48 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RepeatedMessageDeprecated)) checkForDeprecation(base)); } +// Check that deprecated enum values trigger for default values +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecatedDefault)) { + envoy::test::deprecation_test::Base base; + base.mutable_enum_container(); + + EXPECT_LOG_CONTAINS( + "warning", + "Using the default now-deprecated value DEPRECATED_DEFAULT for enum " + "'envoy.test.deprecation_test.Base.InnerMessageWithDeprecationEnum.deprecated_enum' from " + "file deprecated.proto. This enum value will be removed from Envoy soon so a non-default " + "value must now be explicitly set.", + checkForDeprecation(base)); +} + +// Check that deprecated enum values trigger for non-default values +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecated)) { + envoy::test::deprecation_test::Base base; + base.mutable_enum_container()->set_deprecated_enum( + envoy::test::deprecation_test::Base::DEPRECATED_NOT_DEFAULT); + + EXPECT_LOG_CONTAINS( + "warning", + "Using deprecated value DEPRECATED_NOT_DEFAULT for enum " + "'envoy.test.deprecation_test.Base.InnerMessageWithDeprecationEnum.deprecated_enum' " + "from file deprecated.proto. This enum value will be removed from Envoy soon.", + checkForDeprecation(base)); +} + +// Make sure the runtime overrides for protos work, by checking the non-fatal to +// fatal option. +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RuntimeOverrideEnumDefault)) { + envoy::test::deprecation_test::Base base; + base.mutable_enum_container(); + + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.deprecated_features.deprecated.proto:DEPRECATED_DEFAULT", "false"}}); + + // Make sure this is set up right. + EXPECT_THROW_WITH_REGEX(checkForDeprecation(base), ProtoValidationException, + "Using the default now-deprecated value DEPRECATED_DEFAULT"); +} + class TimestampUtilTest : public testing::Test, public ::testing::WithParamInterface {}; TEST_P(TimestampUtilTest, SystemClockToTimestampTest) { diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index b581017f3f92..11424515eb32 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -253,8 +253,8 @@ TEST_F(RdsImplTest, FailureSubscription) { setup(); EXPECT_CALL(init_watcher_, ready()); - rds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, - {}); + // onConfigUpdateFailed() should not be called for gRPC stream connection failure + rds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, {}); } class RouteConfigProviderManagerImplTest : public RdsTestBase { diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index 11d029309411..ef5428e19ea9 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -138,6 +138,22 @@ class DiskLoaderImplTest : public LoaderImplTest { ProtobufWkt::Struct base_; }; +TEST_F(DiskLoaderImplTest, DoubleUintInteraction) { + setup(); + run("test/common/runtime/test_data/current", "envoy_override"); + + EXPECT_EQ(2UL, loader_->snapshot().getInteger("file3", 1)); + EXPECT_EQ(2.0, loader_->snapshot().getDouble("file3", 1.1)); +} + +TEST_F(DiskLoaderImplTest, DoubleUintInteractionNegatives) { + setup(); + run("test/common/runtime/test_data/current", "envoy_override"); + + EXPECT_EQ(1, loader_->snapshot().getInteger("file_with_negative_double", 1)); + EXPECT_EQ(-4.2, loader_->snapshot().getDouble("file_with_negative_double", 1.1)); +} + TEST_F(DiskLoaderImplTest, All) { setup(); run("test/common/runtime/test_data/current", "envoy_override"); @@ -152,6 +168,14 @@ TEST_F(DiskLoaderImplTest, All) { EXPECT_EQ(2UL, loader_->snapshot().getInteger("file3", 1)); EXPECT_EQ(123UL, loader_->snapshot().getInteger("file4", 1)); + // Double getting. + // Bogus string, expect default. + EXPECT_EQ(42.1, loader_->snapshot().getDouble("file_with_words", 42.1)); + // Valid float string. + EXPECT_EQ(23.2, loader_->snapshot().getDouble("file_with_double", 1.1)); + // Valid float string followed by newlines. + EXPECT_EQ(3.141, loader_->snapshot().getDouble("file_with_double_newlines", 1.1)); + bool value; const SnapshotImpl* snapshot = reinterpret_cast(&loader_->snapshot()); @@ -192,6 +216,7 @@ TEST_F(DiskLoaderImplTest, All) { // Files with comments. EXPECT_EQ(123UL, loader_->snapshot().getInteger("file5", 1)); + EXPECT_EQ(2.718, loader_->snapshot().getDouble("file_with_double_comment", 1.1)); EXPECT_EQ("/home#about-us", loader_->snapshot().get("file6")); EXPECT_EQ("", loader_->snapshot().get("file7")); @@ -247,10 +272,17 @@ TEST_F(DiskLoaderImplTest, All) { EXPECT_EQ(0, store_.counter("runtime.load_error").value()); EXPECT_EQ(1, store_.counter("runtime.load_success").value()); - EXPECT_EQ(17, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); + EXPECT_EQ(23, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); EXPECT_EQ(4, store_.gauge("runtime.num_layers", Stats::Gauge::ImportMode::NeverImport).value()); } +TEST_F(DiskLoaderImplTest, UintLargeIntegerConversion) { + setup(); + run("test/common/runtime/test_data/current", "envoy_override"); + + EXPECT_EQ(1, loader_->snapshot().getInteger("file_with_large_integer", 1)); +} + TEST_F(DiskLoaderImplTest, GetLayers) { base_ = TestUtility::parseYaml(R"EOF( foo: whatevs @@ -355,25 +387,35 @@ void testNewOverrides(Loader& loader, Stats::Store& store) { Stats::Gauge& admin_overrides_active = store.gauge("runtime.admin_overrides_active", Stats::Gauge::ImportMode::NeverImport); - // New string + // New string. loader.mergeValues({{"foo", "bar"}}); EXPECT_EQ("bar", loader.snapshot().get("foo")); EXPECT_EQ(1, admin_overrides_active.value()); - // Remove new string + // Remove new string. loader.mergeValues({{"foo", ""}}); EXPECT_EQ("", loader.snapshot().get("foo")); EXPECT_EQ(0, admin_overrides_active.value()); - // New integer + // New integer. loader.mergeValues({{"baz", "42"}}); EXPECT_EQ(42, loader.snapshot().getInteger("baz", 0)); EXPECT_EQ(1, admin_overrides_active.value()); - // Remove new integer + // Remove new integer. loader.mergeValues({{"baz", ""}}); EXPECT_EQ(0, loader.snapshot().getInteger("baz", 0)); EXPECT_EQ(0, admin_overrides_active.value()); + + // New double. + loader.mergeValues({{"beep", "42.1"}}); + EXPECT_EQ(42.1, loader.snapshot().getDouble("beep", 1.2)); + EXPECT_EQ(1, admin_overrides_active.value()); + + // Remove new double. + loader.mergeValues({{"beep", ""}}); + EXPECT_EQ(1.2, loader.snapshot().getDouble("beep", 1.2)); + EXPECT_EQ(0, admin_overrides_active.value()); } TEST_F(DiskLoaderImplTest, MergeValues) { @@ -403,6 +445,16 @@ TEST_F(DiskLoaderImplTest, MergeValues) { EXPECT_EQ(2, loader_->snapshot().getInteger("file3", 1)); EXPECT_EQ(0, admin_overrides_active.value()); + // Override double + loader_->mergeValues({{"file_with_double", "42.1"}}); + EXPECT_EQ(42.1, loader_->snapshot().getDouble("file_with_double", 1.1)); + EXPECT_EQ(1, admin_overrides_active.value()); + + // Remove overridden double + loader_->mergeValues({{"file_with_double", ""}}); + EXPECT_EQ(23.2, loader_->snapshot().getDouble("file_with_double", 1.1)); + EXPECT_EQ(0, admin_overrides_active.value()); + // Override override string loader_->mergeValues({{"file1", "hello overridden override"}}); EXPECT_EQ("hello overridden override", loader_->snapshot().get("file1")); @@ -415,7 +467,7 @@ TEST_F(DiskLoaderImplTest, MergeValues) { EXPECT_EQ(0, store_.gauge("runtime.admin_overrides_active", Stats::Gauge::ImportMode::NeverImport) .value()); - EXPECT_EQ(11, store_.counter("runtime.load_success").value()); + EXPECT_EQ(15, store_.counter("runtime.load_success").value()); EXPECT_EQ(4, store_.gauge("runtime.num_layers", Stats::Gauge::ImportMode::NeverImport).value()); } @@ -498,6 +550,7 @@ TEST_F(StaticLoaderImplTest, All) { setup(); EXPECT_EQ("", loader_->snapshot().get("foo")); EXPECT_EQ(1UL, loader_->snapshot().getInteger("foo", 1)); + EXPECT_EQ(1.1, loader_->snapshot().getDouble("foo", 1.1)); EXPECT_CALL(generator_, random()).WillOnce(Return(49)); EXPECT_TRUE(loader_->snapshot().featureEnabled("foo", 50)); testNewOverrides(*loader_, store_); @@ -530,6 +583,8 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { numerator: 100 foo: bar empty: {} + file_with_words: "some words" + file_with_double: 23.2 )EOF"); setup(); @@ -543,6 +598,10 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { EXPECT_EQ(2UL, loader_->snapshot().getInteger("file3", 1)); EXPECT_EQ(123UL, loader_->snapshot().getInteger("file4", 1)); + // Double getting. + EXPECT_EQ(1.1, loader_->snapshot().getDouble("file_with_words", 1.1)); + EXPECT_EQ(23.2, loader_->snapshot().getDouble("file_with_double", 1.1)); + // Boolean getting. bool value; const SnapshotImpl* snapshot = reinterpret_cast(&loader_->snapshot()); @@ -613,7 +672,7 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { EXPECT_EQ(0, store_.counter("runtime.load_error").value()); EXPECT_EQ(1, store_.counter("runtime.load_success").value()); - EXPECT_EQ(15, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); + EXPECT_EQ(17, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); EXPECT_EQ(2, store_.gauge("runtime.num_layers", Stats::Gauge::ImportMode::NeverImport).value()); } @@ -845,8 +904,9 @@ TEST_F(RtdsLoaderImplTest, FailureSubscription) { setup(); EXPECT_CALL(init_watcher_, ready()); - rtds_callbacks_[0]->onConfigUpdateFailed( - Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, {}); + // onConfigUpdateFailed() should not be called for gRPC stream connection failure + rtds_callbacks_[0]->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, + {}); EXPECT_EQ(0, store_.counter("runtime.load_error").value()); EXPECT_EQ(1, store_.counter("runtime.load_success").value()); diff --git a/test/common/runtime/test_data/root/envoy/file_with_double b/test/common/runtime/test_data/root/envoy/file_with_double new file mode 100644 index 000000000000..3c8ce91a4696 --- /dev/null +++ b/test/common/runtime/test_data/root/envoy/file_with_double @@ -0,0 +1 @@ +23.2 diff --git a/test/common/runtime/test_data/root/envoy/file_with_double_comment b/test/common/runtime/test_data/root/envoy/file_with_double_comment new file mode 100644 index 000000000000..3ea19b108ca2 --- /dev/null +++ b/test/common/runtime/test_data/root/envoy/file_with_double_comment @@ -0,0 +1,2 @@ +# Here's a comment! +2.718 diff --git a/test/common/runtime/test_data/root/envoy/file_with_double_newlines b/test/common/runtime/test_data/root/envoy/file_with_double_newlines new file mode 100644 index 000000000000..8699ce764422 --- /dev/null +++ b/test/common/runtime/test_data/root/envoy/file_with_double_newlines @@ -0,0 +1,4 @@ +3.141 + + + diff --git a/test/common/runtime/test_data/root/envoy/file_with_large_integer b/test/common/runtime/test_data/root/envoy/file_with_large_integer new file mode 100644 index 000000000000..00e88310041a --- /dev/null +++ b/test/common/runtime/test_data/root/envoy/file_with_large_integer @@ -0,0 +1,2 @@ +# 2^64 * 10 +184467440737095516160 diff --git a/test/common/runtime/test_data/root/envoy/file_with_negative_double b/test/common/runtime/test_data/root/envoy/file_with_negative_double new file mode 100644 index 000000000000..50c9d06aa52c --- /dev/null +++ b/test/common/runtime/test_data/root/envoy/file_with_negative_double @@ -0,0 +1 @@ +-4.2 diff --git a/test/common/runtime/test_data/root/envoy/file_with_words b/test/common/runtime/test_data/root/envoy/file_with_words new file mode 100644 index 000000000000..a8287c64e148 --- /dev/null +++ b/test/common/runtime/test_data/root/envoy/file_with_words @@ -0,0 +1 @@ +bogus string diff --git a/test/common/secret/BUILD b/test/common/secret/BUILD index 19712797f54a..a2e85abcef8f 100644 --- a/test/common/secret/BUILD +++ b/test/common/secret/BUILD @@ -24,6 +24,7 @@ envoy_cc_test( "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", + "@envoy_api//envoy/config/grpc_credential/v2alpha:file_based_metadata_cc", ], ) diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index 69e3051ce880..33e208be9f64 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -3,7 +3,9 @@ #include "envoy/admin/v2alpha/config_dump.pb.h" #include "envoy/api/v2/auth/cert.pb.h" #include "envoy/common/exception.h" +#include "envoy/config/grpc_credential/v2alpha/file_based_metadata.pb.h" +#include "common/common/base64.h" #include "common/common/logger.h" #include "common/secret/sds_api.h" #include "common/secret/secret_manager_impl.h" @@ -165,6 +167,90 @@ name: "abc.com" "Secret type not implemented"); } +// Validate that secret manager deduplicates dynamic TLS certificate secret provider. +// Regression test of https://github.com/envoyproxy/envoy/issues/5744 +TEST_F(SecretManagerImplTest, DeduplicateDynamicTlsCertificateSecretProvider) { + Server::MockInstance server; + std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + + NiceMock secret_context; + + NiceMock local_info; + NiceMock dispatcher; + NiceMock random; + Stats::IsolatedStoreImpl stats; + NiceMock init_manager; + NiceMock init_watcher; + Init::TargetHandlePtr init_target_handle; + EXPECT_CALL(init_manager, add(_)) + .WillRepeatedly(Invoke([&init_target_handle](const Init::Target& target) { + init_target_handle = target.createHandle("test"); + })); + EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + + envoy::api::v2::core::ConfigSource config_source; + TestUtility::loadFromYaml(R"( +api_config_source: + api_type: GRPC + grpc_services: + - google_grpc: + call_credentials: + - from_plugin: + name: envoy.grpc_credentials.file_based_metadata + typed_config: + "@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig + stat_prefix: sdsstat + credentials_factory_name: envoy.grpc_credentials.file_based_metadata + )", + config_source); + config_source.mutable_api_config_source() + ->mutable_grpc_services(0) + ->mutable_google_grpc() + ->mutable_call_credentials(0) + ->mutable_from_plugin() + ->mutable_typed_config() + ->set_value(Base64::decode("CjUKMy92YXIvcnVuL3NlY3JldHMva3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3Vud" + "C90b2tlbhILeC10b2tlbi1iaW4=")); + auto secret_provider1 = + secret_manager->findOrCreateTlsCertificateProvider(config_source, "abc.com", secret_context); + + // The base64 encoded proto binary is identical to the one above, but in different field order. + // It is also identical to the YAML below. + config_source.mutable_api_config_source() + ->mutable_grpc_services(0) + ->mutable_google_grpc() + ->mutable_call_credentials(0) + ->mutable_from_plugin() + ->mutable_typed_config() + ->set_value(Base64::decode("Egt4LXRva2VuLWJpbgo1CjMvdmFyL3J1bi9zZWNyZXRzL2t1YmVybmV0ZXMuaW8vc" + "2VydmljZWFjY291bnQvdG9rZW4=")); + auto secret_provider2 = + secret_manager->findOrCreateTlsCertificateProvider(config_source, "abc.com", secret_context); + + envoy::config::grpc_credential::v2alpha::FileBasedMetadataConfig file_based_metadata_config; + TestUtility::loadFromYaml(R"( +header_key: x-token-bin +secret_data: + filename: "/var/run/secrets/kubernetes.io/serviceaccount/token" + )", + file_based_metadata_config); + config_source.mutable_api_config_source() + ->mutable_grpc_services(0) + ->mutable_google_grpc() + ->mutable_call_credentials(0) + ->mutable_from_plugin() + ->mutable_typed_config() + ->PackFrom(file_based_metadata_config); + auto secret_provider3 = + secret_manager->findOrCreateTlsCertificateProvider(config_source, "abc.com", secret_context); + + EXPECT_EQ(secret_provider1, secret_provider2); + EXPECT_EQ(secret_provider2, secret_provider3); +} + TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { Server::MockInstance server; std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); diff --git a/test/common/stats/symbol_table_impl_test.cc b/test/common/stats/symbol_table_impl_test.cc index 4917caeca936..bfd829bde3cd 100644 --- a/test/common/stats/symbol_table_impl_test.cc +++ b/test/common/stats/symbol_table_impl_test.cc @@ -543,26 +543,28 @@ TEST_P(StatNameTest, StatNameSet) { // Test that we get a consistent StatName object from a remembered name. set.rememberBuiltin("remembered"); - const Stats::StatName remembered = set.getStatName("remembered"); + const StatName fallback = set.add("fallback"); + const Stats::StatName remembered = set.getBuiltin("remembered", fallback); EXPECT_EQ("remembered", table_->toString(remembered)); - EXPECT_EQ(remembered.data(), set.getStatName("remembered").data()); + EXPECT_EQ(remembered.data(), set.getBuiltin("remembered", fallback).data()); + EXPECT_EQ(fallback.data(), set.getBuiltin("not_remembered", fallback).data()); // Same test for a dynamically allocated name. The only difference between // the behavior with a remembered vs dynamic name is that when looking // up a remembered name, a mutex is not taken. But we have no easy way // to test for that. So we'll at least cover the code. - const Stats::StatName dynamic = set.getStatName("dynamic"); + const Stats::StatName dynamic = set.getDynamic("dynamic"); EXPECT_EQ("dynamic", table_->toString(dynamic)); - EXPECT_EQ(dynamic.data(), set.getStatName("dynamic").data()); + EXPECT_EQ(dynamic.data(), set.getDynamic("dynamic").data()); // There's another corner case for the same "dynamic" name from a // different set. Here we will get a different StatName object // out of the second set, though it will share the same underlying // symbol-table symbol. StatNameSet set2(*table_); - const Stats::StatName dynamic2 = set2.getStatName("dynamic"); + const Stats::StatName dynamic2 = set2.getDynamic("dynamic"); EXPECT_EQ("dynamic", table_->toString(dynamic2)); - EXPECT_EQ(dynamic2.data(), set2.getStatName("dynamic").data()); + EXPECT_EQ(dynamic2.data(), set2.getDynamic("dynamic").data()); EXPECT_NE(dynamic2.data(), dynamic.data()); } diff --git a/test/common/thread_local/thread_local_impl_test.cc b/test/common/thread_local/thread_local_impl_test.cc index fb3bd1cf3962..f189e2a2481b 100644 --- a/test/common/thread_local/thread_local_impl_test.cc +++ b/test/common/thread_local/thread_local_impl_test.cc @@ -14,7 +14,6 @@ using testing::ReturnPointee; namespace Envoy { namespace ThreadLocal { -namespace { class TestThreadLocalObject : public ThreadLocalObject { public: @@ -46,8 +45,10 @@ class ThreadLocalInstanceImplTest : public testing::Test { object.reset(); return object_ref; } - + int deferredDeletesMapSize() { return tls_.deferred_deletes_.size(); } + int freeSlotIndexesListSize() { return tls_.free_slot_indexes_.size(); } InstanceImpl tls_; + Event::MockDispatcher main_dispatcher_; Event::MockDispatcher thread_dispatcher_; }; @@ -59,15 +60,20 @@ TEST_F(ThreadLocalInstanceImplTest, All) { EXPECT_CALL(thread_dispatcher_, post(_)); SlotPtr slot1 = tls_.allocateSlot(); slot1.reset(); + EXPECT_EQ(deferredDeletesMapSize(), 0); + EXPECT_EQ(freeSlotIndexesListSize(), 1); // Create a new slot which should take the place of the old slot. ReturnPointee() is used to // avoid "leaks" when using InSequence and shared_ptr. SlotPtr slot2 = tls_.allocateSlot(); TestThreadLocalObject& object_ref2 = setObject(*slot2); + EXPECT_EQ(freeSlotIndexesListSize(), 0); EXPECT_CALL(thread_dispatcher_, post(_)); EXPECT_CALL(object_ref2, onDestroy()); + EXPECT_EQ(freeSlotIndexesListSize(), 0); slot2.reset(); + EXPECT_EQ(freeSlotIndexesListSize(), 1); // Make two new slots, shutdown global threading, and delete them. We should not see any // cross-thread posts at this point. We should also see destruction in reverse order. @@ -79,12 +85,53 @@ TEST_F(ThreadLocalInstanceImplTest, All) { tls_.shutdownGlobalThreading(); slot3.reset(); slot4.reset(); + EXPECT_EQ(freeSlotIndexesListSize(), 0); + EXPECT_EQ(deferredDeletesMapSize(), 2); EXPECT_CALL(object_ref4, onDestroy()); EXPECT_CALL(object_ref3, onDestroy()); tls_.shutdownThread(); } +TEST_F(ThreadLocalInstanceImplTest, DeferredRecycle) { + InSequence s; + + // Free a slot without ever calling set. + EXPECT_CALL(thread_dispatcher_, post(_)); + SlotPtr slot1 = tls_.allocateSlot(); + slot1.reset(); + // Slot destructed directly, as there is no out-going callbacks. + EXPECT_EQ(deferredDeletesMapSize(), 0); + EXPECT_EQ(freeSlotIndexesListSize(), 1); + + // Allocate a slot and set value, hold the posted callback and the slot will only be returned + // after the held callback is destructed. + { + SlotPtr slot2 = tls_.allocateSlot(); + EXPECT_EQ(freeSlotIndexesListSize(), 0); + { + Event::PostCb holder; + EXPECT_CALL(thread_dispatcher_, post(_)).WillOnce(Invoke([&](Event::PostCb cb) { + // Holds the posted callback. + holder = cb; + })); + slot2->set( + [](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return nullptr; }); + slot2.reset(); + // Not released yet, as holder has a copy of the ref_count_. + EXPECT_EQ(freeSlotIndexesListSize(), 0); + EXPECT_EQ(deferredDeletesMapSize(), 1); + // This post is called when the holder dies. + EXPECT_CALL(thread_dispatcher_, post(_)); + } + // Slot is deleted now that there holder destructs. + EXPECT_EQ(deferredDeletesMapSize(), 0); + EXPECT_EQ(freeSlotIndexesListSize(), 1); + } + + tls_.shutdownGlobalThreading(); +} + // Test that the config passed into the update callback is the previous version stored in the slot. TEST_F(ThreadLocalInstanceImplTest, UpdateCallback) { InSequence s; @@ -179,6 +226,5 @@ TEST(ThreadLocalInstanceImplDispatcherTest, Dispatcher) { tls.shutdownThread(); } -} // namespace } // namespace ThreadLocal } // namespace Envoy diff --git a/test/common/upstream/cds_api_impl_test.cc b/test/common/upstream/cds_api_impl_test.cc index 48101c09578b..fe6b5e39ab60 100644 --- a/test/common/upstream/cds_api_impl_test.cc +++ b/test/common/upstream/cds_api_impl_test.cc @@ -352,8 +352,8 @@ TEST_F(CdsApiImplTest, FailureSubscription) { setup(); EXPECT_CALL(initialized_, ready()); - cds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, - {}); + // onConfigUpdateFailed() should not be called for gRPC stream connection failure + cds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, {}); EXPECT_EQ("", cds_->versionInfo()); } diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index 688c162807ff..9247c9d84eb8 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -207,14 +207,6 @@ TEST_F(EdsTest, ValidateFail) { EXPECT_FALSE(initialized_); } -// Validate onConfigUpdate() on stream disconnection. -TEST_F(EdsTest, StreamDisconnection) { - initialize(); - eds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, - nullptr); - EXPECT_FALSE(initialized_); -} - // Validate that onConfigUpdate() with unexpected cluster names rejects config. TEST_F(EdsTest, OnConfigUpdateWrongName) { envoy::api::v2::ClusterLoadAssignment cluster_load_assignment; diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc index cee4378e4a21..226533bae5c5 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc @@ -89,6 +89,10 @@ class HttpGrpcAccessLogTest : public testing::Test { socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" @@ -130,6 +134,10 @@ TEST_F(HttpGrpcAccessLogTest, Marshalling) { socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: pipe: path: "/foo" @@ -158,6 +166,10 @@ response: {} socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" @@ -215,6 +227,10 @@ response: {} socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" @@ -286,6 +302,10 @@ protocol_version: HTTP10 socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" @@ -334,6 +354,10 @@ response: {} socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" @@ -390,6 +414,10 @@ response: {} socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" @@ -436,6 +464,10 @@ response: {} socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" @@ -482,6 +514,10 @@ response: {} socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" @@ -528,6 +564,10 @@ response: {} socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" @@ -601,6 +641,10 @@ TEST_F(HttpGrpcAccessLogTest, MarshallingAdditionalHeaders) { socket_address: address: "127.0.0.1" port_value: 0 + downstream_direct_remote_address: + socket_address: + address: "127.0.0.1" + port_value: 0 downstream_local_address: socket_address: address: "127.0.0.2" diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc index 6ee89b890b77..3436c6b62b42 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc @@ -79,6 +79,7 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, // Clear fields which are not deterministic. auto* log_entry = request_msg.mutable_http_logs()->mutable_log_entry(0); log_entry->mutable_common_properties()->clear_downstream_remote_address(); + log_entry->mutable_common_properties()->clear_downstream_direct_remote_address(); log_entry->mutable_common_properties()->clear_downstream_local_address(); log_entry->mutable_common_properties()->clear_start_time(); log_entry->mutable_common_properties()->clear_time_to_last_rx_byte(); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 6941c1b3624d..7ea6341fee7a 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -33,7 +33,8 @@ namespace { class ExtAuthzHttpClientTest : public testing::Test { public: ExtAuthzHttpClientTest() - : async_request_{&async_client_}, config_{createConfig()}, client_{cm_, config_} { + : async_request_{&async_client_}, config_{createConfig()}, + time_source_{async_client_.dispatcher().timeSource()}, client_{cm_, config_, time_source_} { ON_CALL(cm_, httpAsyncClientForCluster(config_->cluster())) .WillByDefault(ReturnRef(async_client_)); } @@ -111,8 +112,10 @@ class ExtAuthzHttpClientTest : public testing::Test { NiceMock async_client_; NiceMock async_request_; ClientConfigSharedPtr config_; + TimeSource& time_source_; RawHttpClientImpl client_; MockRequestCallbacks request_callbacks_; + Tracing::MockSpan active_span_; }; // Test HTTP client config default values. @@ -143,10 +146,11 @@ TEST_F(ExtAuthzHttpClientTest, ClientConfig) { // // Check other attributes. EXPECT_EQ(config_->pathPrefix(), "/bar"); EXPECT_EQ(config_->cluster(), "ext_authz"); + EXPECT_EQ(config_->tracingName(), "async ext_authz egress"); EXPECT_EQ(config_->timeout(), std::chrono::milliseconds{250}); } -// // Test default allowed headers in the HTTP client. +// Test default allowed headers in the HTTP client. TEST_F(ExtAuthzHttpClientTest, TestDefaultAllowedHeaders) { std::string yaml = R"EOF( http_service: @@ -219,20 +223,29 @@ TEST_F(ExtAuthzHttpClientTest, AllowedRequestHeadersPrefix) { // Verify client response when authorization server returns a 200 OK. TEST_F(ExtAuthzHttpClientTest, AuthorizationOk) { + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}}); const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); envoy::service::auth::v2::CheckRequest request; - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + + client_.check(request_callbacks_, request, active_span_); + EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); + EXPECT_CALL(*child_span, finishSpan()); client_.onSuccess(std::move(check_response)); } // Verify client response headers when authorization_headers_to_add is configured. TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeaders) { + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}}); const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); @@ -241,19 +254,26 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeaders) { request.mutable_attributes()->mutable_request()->mutable_http()->mutable_headers(); (*mutable_headers)[std::string{":x-authz-header2"}] = std::string{"forged-value"}; + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); // Expect that header1 will be added and header2 correctly overwritten. EXPECT_CALL(async_client_, send_(AllOf(ContainsPairAsHeader(config_->headersToAdd().front()), ContainsPairAsHeader(config_->headersToAdd().back())), _, _)); - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_.check(request_callbacks_, request, active_span_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); + EXPECT_CALL(*child_span, finishSpan()); client_.onSuccess(std::move(check_response)); } // Verify client response headers when allow_upstream_headers is configured. TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const std::string empty_body{}; const auto expected_headers = TestCommon::makeHeaderValueOption({{"x-baz", "foo", false}, {"bar", "foo", false}}); @@ -261,9 +281,12 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { TestCommon::makeAuthzResponse(CheckStatus::OK, Http::Code::OK, empty_body, expected_headers); envoy::service::auth::v2::CheckRequest request; - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + client_.check(request_callbacks_, request, active_span_); const auto check_response_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}, @@ -273,55 +296,80 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { {"bar", "foo", false}, {"x-baz", "foo", false}, {"foobar", "foo", false}}); + + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); + EXPECT_CALL(*child_span, finishSpan()); auto message_response = TestCommon::makeMessageResponse(check_response_headers); client_.onSuccess(std::move(message_response)); } // Test the client when a denied response is received. TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "403", false}}); const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::Denied, Http::Code::Forbidden, "", expected_headers); envoy::service::auth::v2::CheckRequest request; - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); - + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + client_.check(request_callbacks_, request, active_span_); + + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Forbidden"))); + EXPECT_CALL(*child_span, finishSpan()); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); - client_.onSuccess(TestCommon::makeMessageResponse(expected_headers)); } // Verify client response headers and body when the authorization server denies the request. TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithAllAttributes) { + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_body = std::string{"test"}; const auto expected_headers = TestCommon::makeHeaderValueOption( {{":status", "401", false}, {"foo", "bar", false}, {"x-foobar", "bar", false}}); const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::Denied, Http::Code::Unauthorized, expected_body, expected_headers); + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + envoy::service::auth::v2::CheckRequest request; - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_.check(request_callbacks_, request, active_span_); + + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Unauthorized"))); + EXPECT_CALL(*child_span, finishSpan()); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); - client_.onSuccess(TestCommon::makeMessageResponse(expected_headers, expected_body)); } // Verify client response headers when the authorization server denies the request and // allowed_client_headers is configured. TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_body = std::string{"test"}; const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::Denied, Http::Code::Unauthorized, expected_body, TestCommon::makeHeaderValueOption( {{"x-foo", "bar", false}, {":status", "401", false}, {"foo", "bar", false}})); + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + envoy::service::auth::v2::CheckRequest request; - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_.check(request_callbacks_, request, active_span_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); - + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Unauthorized"))); + EXPECT_CALL(*child_span, finishSpan()); const auto check_response_headers = TestCommon::makeHeaderValueOption({{":method", "post", false}, {"x-foo", "bar", false}, {":status", "401", false}, @@ -331,11 +379,19 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { // Test the client when an unknown error occurs. TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) { + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v2::CheckRequest request; - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); + + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + + client_.check(request_callbacks_, request, active_span_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); + EXPECT_CALL(*child_span, finishSpan()); client_.onFailure(Http::AsyncClient::FailureReason::Reset); } @@ -343,12 +399,18 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) { TEST_F(ExtAuthzHttpClientTest, AuthorizationRequest5xxError) { Http::MessagePtr check_response(new Http::ResponseMessageImpl( Http::HeaderMapPtr{new Http::TestHeaderMapImpl{{":status", "503"}}})); + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v2::CheckRequest request; - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); + + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + client_.check(request_callbacks_, request, active_span_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - + EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Service Unavailable"))); + EXPECT_CALL(*child_span, finishSpan()); client_.onSuccess(std::move(check_response)); } @@ -357,35 +419,55 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequest5xxError) { TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestErrorParsingStatusCode) { Http::MessagePtr check_response(new Http::ResponseMessageImpl( Http::HeaderMapPtr{new Http::TestHeaderMapImpl{{":status", "foo"}}})); + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v2::CheckRequest request; - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); + + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); + + client_.check(request_callbacks_, request, active_span_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); + EXPECT_CALL(*child_span, finishSpan()); client_.onSuccess(std::move(check_response)); } // Test the client when the request is canceled. TEST_F(ExtAuthzHttpClientTest, CancelledAuthorizationRequest) { + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v2::CheckRequest request; + + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_)); - client_.check(request_callbacks_, request, Tracing::NullSpan::instance()); + client_.check(request_callbacks_, request, active_span_); EXPECT_CALL(async_request_, cancel()); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().Status), Eq(Tracing::Tags::get().Canceled))); + EXPECT_CALL(*child_span, finishSpan()); client_.cancel(); } // Test the client when the configured cluster is missing/removed. TEST_F(ExtAuthzHttpClientTest, NoCluster) { InSequence s; + Tracing::MockSpan* child_span{new Tracing::MockSpan()}; + EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); + EXPECT_CALL(*child_span, + setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(cm_, get(Eq("ext_authz"))).WillOnce(Return(nullptr)); EXPECT_CALL(cm_, httpAsyncClientForCluster("ext_authz")).Times(0); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - client_.check(request_callbacks_, envoy::service::auth::v2::CheckRequest{}, - Tracing::NullSpan::instance()); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); + EXPECT_CALL(*child_span, finishSpan()); + client_.check(request_callbacks_, envoy::service::auth::v2::CheckRequest{}, active_span_); } } // namespace diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index 29e4a1097de4..63e889ed6c17 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -91,6 +91,7 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { EXPECT_CALL(context, clusterManager()).Times(1); EXPECT_CALL(context, runtime()).Times(1); EXPECT_CALL(context, scope()).Times(1); + EXPECT_CALL(context, timeSource()).Times(1); Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); testing::StrictMock filter_callback; EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)); diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 76fed14e164f..b4110b5b9acf 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -20,6 +20,9 @@ using Envoy::ProtobufWkt::Empty; namespace Envoy { namespace { +// A magic header value which marks header as not expected. +constexpr char UnexpectedHeaderValue[] = "Unexpected header value"; + class GrpcJsonTranscoderIntegrationTest : public testing::TestWithParam, public HttpIntegrationTest { @@ -135,8 +138,12 @@ class GrpcJsonTranscoderIntegrationTest [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { auto* response = static_cast(context); Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; - EXPECT_EQ(entry.value().getStringView(), - response->headers().get(lower_key)->value().getStringView()); + if (entry.value() == UnexpectedHeaderValue) { + EXPECT_FALSE(response->headers().get(lower_key)); + } else { + EXPECT_EQ(entry.value().getStringView(), + response->headers().get(lower_key)->value().getStringView()); + } return Http::HeaderMap::Iterate::Continue; }, response.get()); @@ -344,6 +351,30 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryGetError1) { ""); } +// Test an upstream that returns an error in a trailer-only response. +TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryErrorConvertedToJson) { + const std::string filter = + R"EOF( + name: envoy.grpc_json_transcoder + config: + proto_descriptor: "{}" + services: "bookstore.Bookstore" + convert_grpc_status: true + )EOF"; + config_helper_.addFilter( + fmt::format(filter, TestEnvironment::runfilesPath("/test/proto/bookstore.descriptor"))); + HttpIntegrationTest::initialize(); + testTranscoding( + Http::TestHeaderMapImpl{ + {":method", "GET"}, {":path", "/shelves/100"}, {":authority", "host"}}, + "", {"shelf: 100"}, {}, Status(Code::NOT_FOUND, "Shelf 100 Not Found"), + Http::TestHeaderMapImpl{{":status", "404"}, + {"content-type", "application/json"}, + {"grpc-status", UnexpectedHeaderValue}, + {"grpc-message", UnexpectedHeaderValue}}, + R"({"code":5,"message":"Shelf 100 Not Found"})"); +} + TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryDelete) { HttpIntegrationTest::initialize(); testTranscoding( @@ -399,6 +430,8 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, BindingAndBody) { TEST_P(GrpcJsonTranscoderIntegrationTest, ServerStreamingGet) { HttpIntegrationTest::initialize(); + + // 1: Normal streaming get testTranscoding( Http::TestHeaderMapImpl{ {":method", "GET"}, {":path", "/shelves/1/books"}, {":authority", "host"}}, @@ -408,6 +441,22 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, ServerStreamingGet) { Status(), Http::TestHeaderMapImpl{{":status", "200"}, {"content-type", "application/json"}}, R"([{"id":"1","author":"Neal Stephenson","title":"Readme"})" R"(,{"id":"2","author":"George R.R. Martin","title":"A Game of Thrones"}])"); + + // 2: Empty response (trailers only) from streaming backend. + // Response type is a valid JSON, so content type should be application/json. + // Regression test for github.com/envoyproxy/envoy#5011 + testTranscoding( + Http::TestHeaderMapImpl{ + {":method", "GET"}, {":path", "/shelves/2/books"}, {":authority", "host"}}, + "", {"shelf: 2"}, {}, Status(), + Http::TestHeaderMapImpl{{":status", "200"}, {"content-type", "application/json"}}, "[]"); + + // 3: Empty response (trailers only) from streaming backend, with a gRPC error. + testTranscoding( + Http::TestHeaderMapImpl{ + {":method", "GET"}, {":path", "/shelves/37/books"}, {":authority", "host"}}, + "", {"shelf: 37"}, {}, Status(Code::NOT_FOUND, "Shelf 37 not found"), + Http::TestHeaderMapImpl{{":status", "200"}, {"content-type", "application/json"}}, "[]"); } TEST_P(GrpcJsonTranscoderIntegrationTest, StreamingPost) { @@ -522,6 +571,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, DeepStruct) { // The valid deep struct is parsed successfully. // Since we didn't set the response, it return 503. + // Response body is empty (not a valid JSON), so content type should be application/grpc. testTranscoding( Http::TestHeaderMapImpl{ {":method", "POST"}, {":path", "/echoStruct"}, {":authority", "host"}}, diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 35d3bc2cbe60..f661f4609b42 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -308,24 +308,24 @@ TEST_F(GrpcJsonTranscoderConfigTest, InvalidVariableBinding) { class GrpcJsonTranscoderFilterTest : public testing::Test, public GrpcJsonTranscoderFilterTestBase { protected: - GrpcJsonTranscoderFilterTest(const bool match_incoming_request_route = false) - : config_(bookstoreProtoConfig(match_incoming_request_route), *api_), filter_(config_) { + GrpcJsonTranscoderFilterTest(envoy::config::filter::http::transcoder::v2::GrpcJsonTranscoder + proto_config = bookstoreProtoConfig()) + : config_(proto_config, *api_), filter_(config_) { filter_.setDecoderFilterCallbacks(decoder_callbacks_); filter_.setEncoderFilterCallbacks(encoder_callbacks_); } - const envoy::config::filter::http::transcoder::v2::GrpcJsonTranscoder - bookstoreProtoConfig(const bool match_incoming_request_route) { + static const envoy::config::filter::http::transcoder::v2::GrpcJsonTranscoder + bookstoreProtoConfig() { std::string json_string = "{\"proto_descriptor\": \"" + bookstoreDescriptorPath() + "\",\"services\": [\"bookstore.Bookstore\"]}"; auto json_config = Json::Factory::loadFromString(json_string); envoy::config::filter::http::transcoder::v2::GrpcJsonTranscoder proto_config{}; Envoy::Config::FilterJson::translateGrpcJsonTranscoder(*json_config, proto_config); - proto_config.set_match_incoming_request_route(match_incoming_request_route); return proto_config; } - const std::string bookstoreDescriptorPath() { + static const std::string bookstoreDescriptorPath() { return TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"); } @@ -574,7 +574,15 @@ TEST_F(GrpcJsonTranscoderFilterTest, ForwardUnaryPostGrpc) { class GrpcJsonTranscoderFilterSkipRecalculatingTest : public GrpcJsonTranscoderFilterTest { public: - GrpcJsonTranscoderFilterSkipRecalculatingTest() : GrpcJsonTranscoderFilterTest(true) {} + GrpcJsonTranscoderFilterSkipRecalculatingTest() + : GrpcJsonTranscoderFilterTest(makeProtoConfig()) {} + +private: + const envoy::config::filter::http::transcoder::v2::GrpcJsonTranscoder makeProtoConfig() { + auto proto_config = bookstoreProtoConfig(); + proto_config.set_match_incoming_request_route(true); + return proto_config; + } }; TEST_F(GrpcJsonTranscoderFilterSkipRecalculatingTest, TranscodingUnaryPostSkipRecalculate) { @@ -734,6 +742,97 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutputAndSpli EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(response_trailers)); } +class GrpcJsonTranscoderFilterConvertGrpcStatusTest : public GrpcJsonTranscoderFilterTest { +public: + GrpcJsonTranscoderFilterConvertGrpcStatusTest() + : GrpcJsonTranscoderFilterTest(makeProtoConfig()) {} + + void SetUp() override { + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + Http::TestHeaderMapImpl request_headers{ + {"content-type", "application/json"}, {":method", "POST"}, {":path", "/shelf"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + + Buffer::OwnedImpl request_data{R"({"theme": "Children"})"}; + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(request_data, true)); + + Http::TestHeaderMapImpl continue_headers{{":status", "000"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_.encode100ContinueHeaders(continue_headers)); + } + +private: + const envoy::config::filter::http::transcoder::v2::GrpcJsonTranscoder makeProtoConfig() { + auto proto_config = bookstoreProtoConfig(); + proto_config.set_convert_grpc_status(true); + return proto_config; + } +}; + +// Single headers frame with end_stream flag (trailer), no grpc-status-details-bin header. +TEST_F(GrpcJsonTranscoderFilterConvertGrpcStatusTest, TranscodingTextHeadersInTrailerOnlyResponse) { + std::string expected_response(R"({"code":5,"message":"Resource not found"})"); + EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false)) + .WillOnce(Invoke([&expected_response](Buffer::Instance& data, bool) { + EXPECT_EQ(expected_response, data.toString()); + })); + + Http::TestHeaderMapImpl response_headers{{":status", "200"}, + {"content-type", "application/grpc"}, + {"grpc-status", "5"}, + {"grpc-message", "Resource not found"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, true)); + EXPECT_EQ("404", response_headers.get_(":status")); + EXPECT_EQ("application/json", response_headers.get_("content-type")); + EXPECT_FALSE(response_headers.has("grpc-status")); + EXPECT_FALSE(response_headers.has("grpc-message")); +} + +// Trailer-only response with grpc-status-details-bin header. +TEST_F(GrpcJsonTranscoderFilterConvertGrpcStatusTest, + TranscodingBinaryHeaderInTrailerOnlyResponse) { + std::string expected_response(R"({"code":5,"message":"Resource not found"})"); + EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false)) + .WillOnce(Invoke([&expected_response](Buffer::Instance& data, bool) { + EXPECT_EQ(expected_response, data.toString()); + })); + + Http::TestHeaderMapImpl response_headers{ + {":status", "200"}, + {"content-type", "application/grpc"}, + {"grpc-status", "5"}, + {"grpc-message", "unused"}, + {"grpc-status-details-bin", "CAUSElJlc291cmNlIG5vdCBmb3VuZA"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, true)); + EXPECT_EQ("404", response_headers.get_(":status")); + EXPECT_EQ("application/json", response_headers.get_("content-type")); + EXPECT_FALSE(response_headers.has("grpc-status")); + EXPECT_FALSE(response_headers.has("grpc-message")); + EXPECT_FALSE(response_headers.has("grpc-status-details-bin")); +} + +// Trailer-only response with grpc-status-details-bin header with details. +// Also tests that a user-defined type from a proto descriptor in config can be used in details. +TEST_F(GrpcJsonTranscoderFilterConvertGrpcStatusTest, + TranscodingBinaryHeaderWithDetailsInTrailerOnlyResponse) { + std::string expected_response( + "{\"code\":5,\"message\":\"Error\",\"details\":" + "[{\"@type\":\"type.googleapis.com/helloworld.HelloReply\",\"message\":\"details\"}]}"); + EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false)) + .WillOnce(Invoke([&expected_response](Buffer::Instance& data, bool) { + EXPECT_EQ(expected_response, data.toString()); + })); + + Http::TestHeaderMapImpl response_headers{ + {":status", "200"}, + {"content-type", "application/grpc"}, + {"grpc-status", "5"}, + {"grpc-message", "unused"}, + {"grpc-status-details-bin", + "CAUSBUVycm9yGjYKKXR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5IZWxsb1JlcGx5EgkKB2RldGFpbHM"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, true)); +} + struct GrpcJsonTranscoderFilterPrintTestParam { std::string config_json_; std::string expected_response_; diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc index 718c03de16cb..1ecf224aa6df 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc @@ -33,7 +33,7 @@ class HttpInspectorTest : public testing::Test { io_handle_(std::make_unique(42)) {} ~HttpInspectorTest() override { io_handle_->close(); } - void init() { + void init(bool include_inline_recv = true) { filter_ = std::make_unique(cfg_); EXPECT_CALL(cb_, socket()).WillRepeatedly(ReturnRef(socket_)); @@ -41,11 +41,18 @@ class HttpInspectorTest : public testing::Test { EXPECT_CALL(cb_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(ReturnRef(*io_handle_)); - EXPECT_CALL(dispatcher_, - createFileEvent_(_, _, Event::FileTriggerType::Edge, Event::FileReadyType::Read)) - .WillOnce( - DoAll(SaveArg<1>(&file_event_callback_), ReturnNew>())); - filter_->onAccept(cb_); + if (include_inline_recv) { + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce(Return(Api::SysCallSizeResult{static_cast(0), 0})); + + EXPECT_CALL(dispatcher_, + createFileEvent_(_, _, Event::FileTriggerType::Edge, + Event::FileReadyType::Read | Event::FileReadyType::Closed)) + .WillOnce(DoAll(SaveArg<1>(&file_event_callback_), + ReturnNew>())); + + filter_->onAccept(cb_); + } } NiceMock os_sys_calls_; @@ -68,8 +75,67 @@ TEST_F(HttpInspectorTest, SkipHttpInspectForTLS) { EXPECT_EQ(filter_->onAccept(cb_), Network::FilterStatus::Continue); } +TEST_F(HttpInspectorTest, InlineReadIoError) { + init(/*include_inline_recv=*/false); + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce(Invoke([](int, void*, size_t, int) -> Api::SysCallSizeResult { + return Api::SysCallSizeResult{ssize_t(-1), 0}; + })); + EXPECT_CALL(dispatcher_, createFileEvent_(_, _, _, _)).Times(0); + EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0); + EXPECT_CALL(socket_, close()).Times(1); + auto accepted = filter_->onAccept(cb_); + EXPECT_EQ(accepted, Network::FilterStatus::StopIteration); + // It's arguable if io error should bump the not_found counter + EXPECT_EQ(0, cfg_->stats().http_not_found_.value()); +} + +TEST_F(HttpInspectorTest, InlineReadInspectHttp10) { + init(/*include_inline_recv=*/false); + const absl::string_view header = + "GET /anything HTTP/1.0\r\nhost: google.com\r\nuser-agent: curl/7.64.0\r\naccept: " + "*/*\r\nx-forwarded-proto: http\r\nx-request-id: " + "a52df4a0-ed00-4a19-86a7-80e5049c6c84\r\nx-envoy-expected-rq-timeout-ms: " + "15000\r\ncontent-length: 0\r\n\r\n"; + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce(Invoke([&header](int, void* buffer, size_t length, int) -> Api::SysCallSizeResult { + ASSERT(length >= header.size()); + memcpy(buffer, header.data(), header.size()); + return Api::SysCallSizeResult{ssize_t(header.size()), 0}; + })); + const std::vector alpn_protos{absl::string_view("http/1.0")}; + + EXPECT_CALL(dispatcher_, createFileEvent_(_, _, _, _)).Times(0); + + EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); + auto accepted = filter_->onAccept(cb_); + EXPECT_EQ(accepted, Network::FilterStatus::Continue); + EXPECT_EQ(1, cfg_->stats().http10_found_.value()); +} + +TEST_F(HttpInspectorTest, InlineReadParseError) { + init(/*include_inline_recv=*/false); + const absl::string_view header = + "NOT_A_LEGAL_PREFIX /anything HTTP/1.0\r\nhost: google.com\r\nuser-agent: " + "curl/7.64.0\r\naccept: " + "*/*\r\nx-forwarded-proto: http\r\nx-request-id: " + "a52df4a0-ed00-4a19-86a7-80e5049c6c84\r\nx-envoy-expected-rq-timeout-ms: " + "15000\r\ncontent-length: 0\r\n\r\n"; + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce(Invoke([&header](int, void* buffer, size_t length, int) -> Api::SysCallSizeResult { + ASSERT(length >= header.size()); + memcpy(buffer, header.data(), header.size()); + return Api::SysCallSizeResult{ssize_t(header.size()), 0}; + })); + EXPECT_CALL(dispatcher_, createFileEvent_(_, _, _, _)).Times(0); + EXPECT_CALL(socket_, setRequestedApplicationProtocols(_)).Times(0); + auto accepted = filter_->onAccept(cb_); + EXPECT_EQ(accepted, Network::FilterStatus::Continue); + EXPECT_EQ(1, cfg_->stats().http_not_found_.value()); +} + TEST_F(HttpInspectorTest, InspectHttp10) { - init(); + init(true); const absl::string_view header = "GET /anything HTTP/1.0\r\nhost: google.com\r\nuser-agent: curl/7.64.0\r\naccept: " "*/*\r\nx-forwarded-proto: http\r\nx-request-id: " @@ -199,7 +265,7 @@ TEST_F(HttpInspectorTest, InspectHttp2) { return Api::SysCallSizeResult{ssize_t(data.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("h2")}; + const std::vector alpn_protos{absl::string_view("h2c")}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -232,7 +298,7 @@ TEST_F(HttpInspectorTest, ReadError) { EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { return Api::SysCallSizeResult{ssize_t(-1), ENOTSUP}; })); - EXPECT_CALL(cb_, continueFilterChain(true)); + EXPECT_CALL(cb_, continueFilterChain(false)); file_event_callback_(Event::FileReadyType::Read); EXPECT_EQ(1, cfg_->stats().read_error_.value()); } @@ -240,7 +306,7 @@ TEST_F(HttpInspectorTest, ReadError) { TEST_F(HttpInspectorTest, MultipleReadsHttp2) { init(); - const std::vector alpn_protos = {absl::string_view("h2")}; + const std::vector alpn_protos = {absl::string_view("h2c")}; const std::string header = "505249202a20485454502f322e300d0a0d0a534d0d0a0d0a00000c04000000000000041000000000020000000000" @@ -386,7 +452,10 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1BadProtocol) { init(); - const absl::string_view data = "GET /index HTT\r"; + const std::string valid_header = "GET /index HTTP/1.1\r"; + // offset: 0 10 + const std::string truncate_header = valid_header.substr(0, 14).append("\r"); + { InSequence s; @@ -394,14 +463,14 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1BadProtocol) { return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; })); - for (size_t i = 1; i <= data.length(); i++) { + for (size_t i = 1; i <= truncate_header.length(); i++) { EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) - .WillOnce( - Invoke([&data, i](int, void* buffer, size_t length, int) -> Api::SysCallSizeResult { - ASSERT(length >= data.size()); - memcpy(buffer, data.data(), data.size()); - return Api::SysCallSizeResult{ssize_t(i), 0}; - })); + .WillOnce(Invoke([&truncate_header, i](int, void* buffer, size_t length, + int) -> Api::SysCallSizeResult { + ASSERT(length >= truncate_header.size()); + memcpy(buffer, truncate_header.data(), truncate_header.size()); + return Api::SysCallSizeResult{ssize_t(i), 0}; + })); } } diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 683bdfc1c5fd..bfec9121e16f 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -51,7 +51,7 @@ class ProxyProtocolTest : public testing::TestWithParamallocateDispatcher()), socket_(Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true), - connection_handler_(new Server::ConnectionHandlerImpl(ENVOY_LOGGER(), *dispatcher_)), + connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, "test_thread")), name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()) { connection_handler_->addListener(*this); @@ -890,7 +890,7 @@ class WildcardProxyProtocolTest : public testing::TestWithParamip()->port())), - connection_handler_(new Server::ConnectionHandlerImpl(ENVOY_LOGGER(), *dispatcher_)), + connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, "test_thread")), name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()) { connection_handler_->addListener(*this); conn_ = dispatcher_->createClientConnection(local_dst_address_, diff --git a/test/extensions/filters/network/mongo_proxy/proxy_test.cc b/test/extensions/filters/network/mongo_proxy/proxy_test.cc index a0e080dca2be..6dfda5703cc7 100644 --- a/test/extensions/filters/network/mongo_proxy/proxy_test.cc +++ b/test/extensions/filters/network/mongo_proxy/proxy_test.cc @@ -387,17 +387,17 @@ TEST_F(MongoProxyFilterTest, CommandStats) { QueryMessagePtr message(new QueryMessageImpl(0, 0)); message->fullCollectionName("db.$cmd"); message->flags(0b1110010); - message->query(Bson::DocumentImpl::create()->addString("foo", "bar")); + message->query(Bson::DocumentImpl::create()->addString("insert", "bar")); filter_->callbacks_->decodeQuery(std::move(message)); })); filter_->onData(fake_data_, false); EXPECT_CALL(store_, deliverHistogramToSinks( - Property(&Stats::Metric::name, "test.cmd.foo.reply_num_docs"), 1)); + Property(&Stats::Metric::name, "test.cmd.insert.reply_num_docs"), 1)); EXPECT_CALL(store_, deliverHistogramToSinks( - Property(&Stats::Metric::name, "test.cmd.foo.reply_size"), 22)); + Property(&Stats::Metric::name, "test.cmd.insert.reply_size"), 22)); EXPECT_CALL(store_, deliverHistogramToSinks( - Property(&Stats::Metric::name, "test.cmd.foo.reply_time_ms"), _)); + Property(&Stats::Metric::name, "test.cmd.insert.reply_time_ms"), _)); EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { ReplyMessagePtr message(new ReplyMessageImpl(0, 0)); @@ -408,7 +408,7 @@ TEST_F(MongoProxyFilterTest, CommandStats) { })); filter_->onWrite(fake_data_, false); - EXPECT_EQ(1U, store_.counter("test.cmd.foo.total").value()); + EXPECT_EQ(1U, store_.counter("test.cmd.insert.total").value()); } TEST_F(MongoProxyFilterTest, CallingFunctionStats) { @@ -572,7 +572,7 @@ TEST_F(MongoProxyFilterTest, EmptyActiveQueryList) { QueryMessagePtr message(new QueryMessageImpl(0, 0)); message->fullCollectionName("db.$cmd"); message->flags(0b1110010); - message->query(Bson::DocumentImpl::create()->addString("foo", "bar")); + message->query(Bson::DocumentImpl::create()->addString("query", "bar")); filter_->callbacks_->decodeQuery(std::move(message)); })); filter_->onData(fake_data_, false); diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index 6c27bade7766..03e32d788b62 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -42,8 +42,7 @@ class ActiveQuicListenerPeer { } }; -class ActiveQuicListenerTest : public testing::TestWithParam, - protected Logger::Loggable { +class ActiveQuicListenerTest : public testing::TestWithParam { public: ActiveQuicListenerTest() : version_(GetParam()), api_(Api::createApiForTest(simulated_time_system_)), @@ -53,7 +52,7 @@ class ActiveQuicListenerTest : public testing::TestWithParamcallbacks_->connection().addConnectionCallbacks( network_connection_callbacks_); }}), - connection_handler_(ENVOY_LOGGER(), *dispatcher_) { + connection_handler_(*dispatcher_, "test_thread") { EXPECT_CALL(listener_config_, listenerFiltersTimeout()); EXPECT_CALL(listener_config_, continueOnListenerFiltersTimeout()); EXPECT_CALL(listener_config_, listenerTag()); @@ -80,8 +79,8 @@ class ActiveQuicListenerTest : public testing::TestWithParam( - *dispatcher_, connection_handler_, ENVOY_LOGGER(), listener_config_, quic_config_); + quic_listener_ = std::make_unique(*dispatcher_, connection_handler_, + listener_config_, quic_config_); simulated_time_system_.sleep(std::chrono::milliseconds(100)); } diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index 60cd220f6978..b42ab88d606f 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -64,7 +64,7 @@ class EnvoyQuicDispatcherTest : public testing::TestWithParam(*dispatcher_), diff --git a/test/extensions/tracers/zipkin/config_test.cc b/test/extensions/tracers/zipkin/config_test.cc index 8b211fa74d10..14841b034e17 100644 --- a/test/extensions/tracers/zipkin/config_test.cc +++ b/test/extensions/tracers/zipkin/config_test.cc @@ -26,6 +26,7 @@ TEST(ZipkinTracerConfigTest, ZipkinHttpTracer) { config: collector_cluster: fake_cluster collector_endpoint: /api/v1/spans + collector_endpoint_version: HTTP_JSON )EOF"; envoy::config::trace::v2::Tracing configuration; diff --git a/test/extensions/transport_sockets/tls/test_private_key_method_provider.cc b/test/extensions/transport_sockets/tls/test_private_key_method_provider.cc index cf78fbf3a304..995bd7060afb 100644 --- a/test/extensions/transport_sockets/tls/test_private_key_method_provider.cc +++ b/test/extensions/transport_sockets/tls/test_private_key_method_provider.cc @@ -99,8 +99,9 @@ static ssl_private_key_result_t rsaPrivateKeySign(SSL* ssl, uint8_t* out, size_t const uint8_t* in, size_t in_len) { TestPrivateKeyConnection* ops = static_cast( SSL_get_ex_data(ssl, TestPrivateKeyMethodProvider::rsaConnectionIndex())); - unsigned char hash[EVP_MAX_MD_SIZE]; - unsigned int hash_len; + unsigned char hash[EVP_MAX_MD_SIZE] = {0}; + unsigned int hash_len = EVP_MAX_MD_SIZE; + std::vector in2; if (!ops) { return ssl_private_key_failure; @@ -119,7 +120,17 @@ static ssl_private_key_result_t rsaPrivateKeySign(SSL* ssl, uint8_t* out, size_t return ssl_private_key_failure; } - if (!calculateDigest(md, in, in_len, hash, &hash_len)) { + in2.assign(in, in + in_len); + + // If crypto error is set, we'll modify the incoming token by flipping + // the bits. + if (ops->test_options_.crypto_error_) { + for (size_t i = 0; i < in_len; i++) { + in2[i] = ~in2[i]; + } + } + + if (!calculateDigest(md, in2.data(), in_len, hash, &hash_len)) { return ssl_private_key_failure; } @@ -128,14 +139,11 @@ static ssl_private_key_result_t rsaPrivateKeySign(SSL* ssl, uint8_t* out, size_t return ssl_private_key_failure; } - if (ops->test_options_.crypto_error_) { - // Flip the bits in the first byte of the digest so that the handshake will fail. - hash[0] ^= hash[0]; - } - // Perform RSA signing. if (SSL_is_signature_algorithm_rsa_pss(signature_algorithm)) { - RSA_sign_pss_mgf1(rsa, out_len, out, max_out, hash, hash_len, md, nullptr, -1); + if (!RSA_sign_pss_mgf1(rsa, out_len, out, max_out, hash, hash_len, md, nullptr, -1)) { + return ssl_private_key_failure; + } } else { unsigned int out_len_unsigned; if (!RSA_sign(EVP_MD_type(md), hash, hash_len, out, &out_len_unsigned, rsa)) { diff --git a/test/integration/ads_integration.h b/test/integration/ads_integration.h index 628550fb9cea..51a229e9dfe3 100644 --- a/test/integration/ads_integration.h +++ b/test/integration/ads_integration.h @@ -13,8 +13,9 @@ #include "test/integration/http_integration.h" namespace Envoy { -static const std::string& AdsIntegrationConfig() { - CONSTRUCT_ON_FIRST_USE(std::string, R"EOF( +static std::string AdsIntegrationConfig() { + // Note: do not use CONSTRUCT_ON_FIRST_USE here! + return R"EOF( dynamic_resources: lds_config: {ads: {}} cds_config: {ads: {}} @@ -38,7 +39,7 @@ static const std::string& AdsIntegrationConfig() { socket_address: address: 127.0.0.1 port_value: 0 -)EOF"); +)EOF"; } class AdsIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index f55806361987..9b8efad2b7d7 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -397,7 +397,7 @@ FakeUpstream::FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket : http_type_(type), socket_(std::move(listen_socket)), api_(Api::createApiForTest(stats_store_)), time_system_(time_system), dispatcher_(api_->allocateDispatcher()), - handler_(new Server::ConnectionHandlerImpl(ENVOY_LOGGER(), *dispatcher_)), + handler_(new Server::ConnectionHandlerImpl(*dispatcher_, "fake_upstream")), allow_unexpected_disconnects_(false), read_disable_on_new_connection_(true), enable_half_close_(enable_half_close), listener_(*this), filter_chain_(Network::Test::createEmptyFilterChain(std::move(transport_socket_factory))) { diff --git a/test/integration/http_subset_lb_integration_test.cc b/test/integration/http_subset_lb_integration_test.cc index 10b882feef95..603c903b86e1 100644 --- a/test/integration/http_subset_lb_integration_test.cc +++ b/test/integration/http_subset_lb_integration_test.cc @@ -42,7 +42,8 @@ class HttpSubsetLbIntegrationTest : public testing::TestWithParamclear_hosts(); - // Create a load assignment with num_hosts_ entries with metadata split evenly between type=a - // and type=b. + // Create a load assignment with num_hosts_ entries with metadata split evenly between + // type=a and type=b. auto* load_assignment = cluster->mutable_load_assignment(); load_assignment->set_cluster_name(cluster->name()); auto* endpoints = load_assignment->add_endpoints(); @@ -71,7 +72,8 @@ class HttpSubsetLbIntegrationTest : public testing::TestWithParammutable_endpoint(); auto* addr = endpoint->mutable_address()->mutable_socket_address(); - addr->set_address("127.0.0.1"); + addr->set_address(Network::Test::getLoopbackAddressString( + TestEnvironment::getIpVersionsForTest().front())); addr->set_port_value(0); // Assign type metadata based on i. diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 74a487a25e82..66185338b8e0 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -33,11 +33,9 @@ std::string normalizeDate(const std::string& s) { return std::regex_replace(s, date_regex, "date: Mon, 01 Jan 2017 00:00:00 GMT"); } -void setAllowAbsoluteUrl( +void setDisallowAbsoluteUrl( envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) { - envoy::api::v2::core::Http1ProtocolOptions options; - options.mutable_allow_absolute_url()->set_value(true); - hcm.mutable_http_protocol_options()->CopyFrom(options); + hcm.mutable_http_protocol_options()->mutable_allow_absolute_url()->set_value(false); }; void setAllowHttp10WithDefaultHost( @@ -52,6 +50,29 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, IntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +// Make sure we have correctly specified per-worker performance stats. +// TODO(mattklein123): We should flesh this test out to a) actually use more than 1 worker and +// b) do some real requests and verify things work correctly on a per-worker basis. I will do this +// in my next change when I add optional CX balancing as it well then be easier to write a +// deterministic test. +TEST_P(IntegrationTest, PerWorkerStats) { + initialize(); + + // Per-worker listener stats. + if (GetParam() == Network::Address::IpVersion::v4) { + EXPECT_NE(nullptr, test_server_->counter("listener.127.0.0.1_0.worker_0.downstream_cx_total")); + } else { + EXPECT_NE(nullptr, test_server_->counter("listener.[__1]_0.worker_0.downstream_cx_total")); + } + + // Main thread admin listener stats. + EXPECT_NE(nullptr, test_server_->counter("listener.admin.main_thread.downstream_cx_total")); + + // Per-thread watchdog stats. + EXPECT_NE(nullptr, test_server_->counter("server.main_thread.watchdog_miss")); + EXPECT_NE(nullptr, test_server_->counter("server.worker_0.watchdog_miss")); +} + TEST_P(IntegrationTest, RouterDirectResponse) { const std::string body = "Response body"; const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", body); @@ -393,6 +414,7 @@ TEST_P(IntegrationTest, NoHost) { } TEST_P(IntegrationTest, BadPath) { + config_helper_.addConfigModifier(&setDisallowAbsoluteUrl); initialize(); std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), @@ -407,7 +429,6 @@ TEST_P(IntegrationTest, AbsolutePath) { auto host = config_helper_.createVirtualHost("www.redirect.com", "/"); host.set_require_tls(envoy::api::v2::route::VirtualHost::ALL); config_helper_.addVirtualHost(host); - config_helper_.addConfigModifier(&setAllowAbsoluteUrl); initialize(); std::string response; @@ -423,7 +444,6 @@ TEST_P(IntegrationTest, AbsolutePathWithPort) { auto host = config_helper_.createVirtualHost("www.namewithport.com:1234", "/"); host.set_require_tls(envoy::api::v2::route::VirtualHost::ALL); config_helper_.addVirtualHost(host); - config_helper_.addConfigModifier(&setAllowAbsoluteUrl); initialize(); std::string response; sendRawHttpAndWaitForResponse( @@ -440,7 +460,6 @@ TEST_P(IntegrationTest, AbsolutePathWithoutPort) { auto host = config_helper_.createVirtualHost("www.namewithport.com:1234", "/"); host.set_require_tls(envoy::api::v2::route::VirtualHost::ALL); config_helper_.addVirtualHost(host); - config_helper_.addConfigModifier(&setAllowAbsoluteUrl); initialize(); std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), @@ -460,8 +479,8 @@ TEST_P(IntegrationTest, Connect) { cloned_listener->CopyFrom(*old_listener); old_listener->set_name("http_forward"); }); - // Set the first listener to allow absolute URLs. - config_helper_.addConfigModifier(&setAllowAbsoluteUrl); + // Set the first listener to disallow absolute URLs. + config_helper_.addConfigModifier(&setDisallowAbsoluteUrl); initialize(); std::string response1; diff --git a/test/integration/proxy_proto_integration_test.cc b/test/integration/proxy_proto_integration_test.cc index c952201db30f..cd8b4de73ea5 100644 --- a/test/integration/proxy_proto_integration_test.cc +++ b/test/integration/proxy_proto_integration_test.cc @@ -89,7 +89,7 @@ TEST_P(ProxyProtoIntegrationTest, RouterProxyUnknownLongRequestAndResponseWithBo testRouterRequestAndResponseWithBody(1024, 512, false, &creator); } -TEST_P(ProxyProtoIntegrationTest, OriginalDst) { +TEST_P(ProxyProtoIntegrationTest, DEPRECATED_FEATURE_TEST(OriginalDst)) { // Change the cluster to an original destination cluster. An original destination cluster // ignores the configured hosts, and instead uses the restored destination address from the // incoming (server) connection as the destination address for the outgoing (client) connection. @@ -119,4 +119,34 @@ TEST_P(ProxyProtoIntegrationTest, OriginalDst) { testRouterRequestAndResponseWithBody(1024, 512, false, &creator); } +TEST_P(ProxyProtoIntegrationTest, ClusterProvided) { + // Change the cluster to an original destination cluster. An original destination cluster + // ignores the configured hosts, and instead uses the restored destination address from the + // incoming (server) connection as the destination address for the outgoing (client) connection. + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v2::Bootstrap& bootstrap) -> void { + auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + cluster->mutable_hosts()->Clear(); + cluster->set_type(envoy::api::v2::Cluster::ORIGINAL_DST); + cluster->set_lb_policy(envoy::api::v2::Cluster::CLUSTER_PROVIDED); + }); + + ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { + Network::ClientConnectionPtr conn = makeClientConnection(lookupPort("http")); + // Create proxy protocol line that has the fake upstream address as the destination address. + // This address will become the "restored" address for the server connection and will + // be used as the destination address by the original destination cluster. + std::string proxyLine = fmt::format( + "PROXY {} {} 65535 {}\r\n", + GetParam() == Network::Address::IpVersion::v4 ? "TCP4 1.2.3.4" : "TCP6 1:2:3::4", + Network::Test::getLoopbackAddressString(GetParam()), + fake_upstreams_[0]->localAddress()->ip()->port()); + + Buffer::OwnedImpl buf(proxyLine); + conn->write(buf, false); + return conn; + }; + + testRouterRequestAndResponseWithBody(1024, 512, false, &creator); +} + } // namespace Envoy diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 2118a0e1aaa2..5745603b7ab5 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -252,6 +252,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2019/07/24 7503 43030 44000 add upstream filters to clusters // 2019/08/13 7877 42838 44000 skip EdfScheduler creation if all host weights equal // 2019/09/02 8118 42830 43000 Share symbol-tables in cluster/host stats. + // 2019/09/16 8100 42894 43000 Add transport socket matcher in cluster. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -261,7 +262,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // On a local clang8/libstdc++/linux flow, the memory usage was observed in // June 2019 to be 64 bytes higher than it is in CI/release. Your mileage may // vary. - EXPECT_MEMORY_EQ(m_per_cluster, 42830); // 104 bytes higher than a debug build. + EXPECT_MEMORY_EQ(m_per_cluster, 42894); // 104 bytes higher than a debug build. EXPECT_MEMORY_LE(m_per_cluster, 44000); } @@ -287,6 +288,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // ---------- ----- ----------------- ----- // 2019/08/09 7882 35489 36000 Initial version // 2019/09/02 8118 34585 34500 Share symbol-tables in cluster/host stats. + // 2019/09/16 8100 34585 34500 Add transport socket matcher in cluster. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -296,7 +298,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // On a local clang8/libstdc++/linux flow, the memory usage was observed in // June 2019 to be 64 bytes higher than it is in CI/release. Your mileage may // vary. - EXPECT_MEMORY_EQ(m_per_cluster, 34585); // 104 bytes higher than a debug build. + EXPECT_MEMORY_EQ(m_per_cluster, 34649); // 104 bytes higher than a debug build. EXPECT_MEMORY_LE(m_per_cluster, 36000); } diff --git a/test/mocks/api/mocks.h b/test/mocks/api/mocks.h index f415023ba75b..97a27b03300e 100644 --- a/test/mocks/api/mocks.h +++ b/test/mocks/api/mocks.h @@ -40,6 +40,7 @@ class MockApi : public Api { MOCK_METHOD0(fileSystem, Filesystem::Instance&()); MOCK_METHOD0(threadFactory, Thread::ThreadFactory&()); MOCK_METHOD0(rootScope, const Stats::Scope&()); + MOCK_METHOD0(processContext, OptProcessContextRef()); testing::NiceMock file_system_; Event::GlobalTimeSystem time_system_; diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 77d2c37d74c6..0fc8439779aa 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -345,6 +345,7 @@ class MockConnectionHandler : public ConnectionHandler { MOCK_METHOD0(stopListeners, void()); MOCK_METHOD0(disableListeners, void()); MOCK_METHOD0(enableListeners, void()); + MOCK_METHOD0(statPrefix, const std::string&()); }; class MockIp : public Address::Ip { diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index 388d789a8940..8e55cc6732d3 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -54,6 +54,7 @@ class MockSnapshot : public Snapshot { uint64_t random_value)); MOCK_CONST_METHOD1(get, const std::string&(const std::string& key)); MOCK_CONST_METHOD2(getInteger, uint64_t(const std::string& key, uint64_t default_value)); + MOCK_CONST_METHOD2(getDouble, double(const std::string& key, double default_value)); MOCK_CONST_METHOD0(getLayers, const std::vector&()); }; diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc index 92ba5f5456dc..87263612946e 100644 --- a/test/mocks/server/mocks.cc +++ b/test/mocks/server/mocks.cc @@ -72,7 +72,7 @@ MockWatchDog::MockWatchDog() = default; MockWatchDog::~MockWatchDog() = default; MockGuardDog::MockGuardDog() : watch_dog_(new NiceMock()) { - ON_CALL(*this, createWatchDog(_)).WillByDefault(Return(watch_dog_)); + ON_CALL(*this, createWatchDog(_, _)).WillByDefault(Return(watch_dog_)); } MockGuardDog::~MockGuardDog() = default; diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index b8398a630b50..bcbc232bd240 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -193,7 +193,8 @@ class MockGuardDog : public GuardDog { ~MockGuardDog() override; // Server::GuardDog - MOCK_METHOD1(createWatchDog, WatchDogSharedPtr(Thread::ThreadId)); + MOCK_METHOD2(createWatchDog, + WatchDogSharedPtr(Thread::ThreadId thread_id, const std::string& thread_name)); MOCK_METHOD1(stopWatching, void(WatchDogSharedPtr wd)); std::shared_ptr watch_dog_; @@ -293,7 +294,9 @@ class MockWorkerFactory : public WorkerFactory { ~MockWorkerFactory() override; // Server::WorkerFactory - WorkerPtr createWorker(OverloadManager&) override { return WorkerPtr{createWorker_()}; } + WorkerPtr createWorker(OverloadManager&, const std::string&) override { + return WorkerPtr{createWorker_()}; + } MOCK_METHOD0(createWorker_, Worker*()); }; @@ -470,7 +473,7 @@ class MockFactoryContext : public virtual FactoryContext { Event::TestTimeSystem& timeSystem() { return time_system_; } Grpc::Context& grpcContext() override { return grpc_context_; } Http::Context& httpContext() override { return http_context_; } - MOCK_METHOD0(processContext, absl::optional>()); + MOCK_METHOD0(processContext, OptProcessContextRef()); MOCK_METHOD0(messageValidationVisitor, ProtobufMessage::ValidationVisitor&()); MOCK_METHOD0(api, Api::Api&()); diff --git a/test/proto/BUILD b/test/proto/BUILD index 08c8384dee44..71f551469cdf 100644 --- a/test/proto/BUILD +++ b/test/proto/BUILD @@ -31,6 +31,9 @@ envoy_proto_descriptor( name = "bookstore_proto_descriptor", srcs = [ "bookstore.proto", + # JSON transcoder doesn't link against ":helloworld_proto_cc", so we can add it to the + # descriptor and test that we can actually transcode types not linked into the test binary. + "helloworld.proto", ], out = "bookstore.descriptor", external_deps = [ diff --git a/test/proto/deprecated.proto b/test/proto/deprecated.proto index 541cab6a9584..444f85b56c1a 100644 --- a/test/proto/deprecated.proto +++ b/test/proto/deprecated.proto @@ -15,4 +15,16 @@ message Base { InnerMessage not_deprecated_message = 5; repeated InnerMessage repeated_message = 6; repeated InnerMessage deprecated_repeated_message = 7 [deprecated = true]; + + // For deprecated enum value testing, stick the enum in a container, to avoid + // the default instantiation of Base having a deprecated-by-default value. + enum DeprecationEnum { + DEPRECATED_DEFAULT = 0 [deprecated = true]; + NOT_DEPRECATED = 1; + DEPRECATED_NOT_DEFAULT = 2 [deprecated = true]; + } + message InnerMessageWithDeprecationEnum { + DeprecationEnum deprecated_enum = 1; + } + InnerMessageWithDeprecationEnum enum_container = 8; } diff --git a/test/server/BUILD b/test/server/BUILD index 161688205c06..e252f5d0f385 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -165,6 +165,7 @@ envoy_cc_test( envoy_cc_test_library( name = "listener_manager_impl_test_lib", hdrs = ["listener_manager_impl_test.h"], + data = ["//test/extensions/transport_sockets/tls/test_data:certs"], deps = [ "//source/server:listener_manager_lib", "//test/mocks/network:network_mocks", @@ -172,13 +173,13 @@ envoy_cc_test_library( "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", "//test/test_common:test_time_lib", + "//test/test_common:threadsafe_singleton_injector_lib", ], ) envoy_cc_test( name = "listener_manager_impl_test", srcs = ["listener_manager_impl_test.cc"], - data = ["//test/extensions/transport_sockets/tls/test_data:certs"], deps = [ ":listener_manager_impl_test_lib", ":utility_lib", @@ -198,7 +199,6 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls:ssl_socket_lib", "//source/server:active_raw_udp_listener_config", "//test/test_common:registry_lib", - "//test/test_common:threadsafe_singleton_injector_lib", ], ) @@ -209,7 +209,9 @@ envoy_cc_test( tags = ["nofips"], deps = [ ":listener_manager_impl_test_lib", + ":utility_lib", "//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib", + "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib", "//source/extensions/transport_sockets/raw_buffer:config", "//test/test_common:threadsafe_singleton_injector_lib", ], diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index a6dadb2b9e54..40d9f89489bb 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -32,7 +32,7 @@ namespace { class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable { public: ConnectionHandlerTest() - : handler_(new ConnectionHandlerImpl(ENVOY_LOGGER(), dispatcher_)), + : handler_(new ConnectionHandlerImpl(dispatcher_, "test")), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()) {} class TestListener : public Network::ListenerConfig, public LinkedObject { @@ -379,7 +379,18 @@ TEST_F(ConnectionHandlerTest, NormalRedirect) { EXPECT_CALL(dispatcher_, createServerConnection_(_, _)).WillOnce(Return(connection)); EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillOnce(Return(true)); listener_callbacks1->onAccept(Network::ConnectionSocketPtr{accepted_socket}, true); + + // Verify per-listener connection stats. EXPECT_EQ(1UL, handler_->numConnections()); + EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1UL, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, "test.downstream_cx_total")->value()); + EXPECT_EQ(1UL, TestUtility::findGauge(stats_store_, "test.downstream_cx_active")->value()); + + connection->close(Network::ConnectionCloseType::NoFlush); + dispatcher_.clearDeferredDeleteList(); + EXPECT_EQ(0UL, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(0UL, TestUtility::findGauge(stats_store_, "test.downstream_cx_active")->value()); EXPECT_CALL(*listener2, onDestroy()); EXPECT_CALL(*listener1, onDestroy()); diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 66f5b117121f..011ba8dbfcb8 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -98,7 +98,7 @@ class GuardDogDeathTest : public GuardDogTestBase { void SetupForDeath() { InSequence s; initGuardDog(fakestats_, config_kill_); - unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); guard_dog_->forceCheckForTest(); time_system_->sleep(std::chrono::milliseconds(99)); // 1 ms shy of death. } @@ -110,9 +110,11 @@ class GuardDogDeathTest : public GuardDogTestBase { void SetupForMultiDeath() { InSequence s; initGuardDog(fakestats_, config_multikill_); - auto unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + auto unpet_dog_ = + guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); guard_dog_->forceCheckForTest(); - auto second_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + auto second_dog_ = + guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); guard_dog_->forceCheckForTest(); time_system_->sleep(std::chrono::milliseconds(499)); // 1 ms shy of multi-death. } @@ -177,8 +179,9 @@ TEST_P(GuardDogAlmostDeadTest, NearDeathTest) { // there is no death. The positive case is covered in MultiKillDeathTest. InSequence s; initGuardDog(fakestats_, config_multikill_); - auto unpet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); - auto pet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + auto unpet_dog = + guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + auto pet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); // This part "waits" 600 milliseconds while one dog is touched every 100, and // the other is not. 600ms is over the threshold of 500ms for multi-kill but // only one is nonresponsive, so there should be no kill (single kill @@ -194,6 +197,23 @@ class GuardDogMissTest : public GuardDogTestBase { protected: GuardDogMissTest() : config_miss_(500, 1000, 0, 0), config_mega_(1000, 500, 0, 0) {} + void checkMiss(uint64_t count, const std::string& descriptor) { + EXPECT_EQ(count, TestUtility::findCounter(stats_store_, "server.watchdog_miss")->value()) + << descriptor; + EXPECT_EQ(count, + TestUtility::findCounter(stats_store_, "server.test_thread.watchdog_miss")->value()) + << descriptor; + } + + void checkMegaMiss(uint64_t count, const std::string& descriptor) { + EXPECT_EQ(count, TestUtility::findCounter(stats_store_, "server.watchdog_mega_miss")->value()) + << descriptor; + EXPECT_EQ( + count, + TestUtility::findCounter(stats_store_, "server.test_thread.watchdog_mega_miss")->value()) + << descriptor; + } + NiceMock config_miss_; NiceMock config_mega_; }; @@ -205,17 +225,18 @@ TEST_P(GuardDogMissTest, MissTest) { // This test checks the actual collected statistics after doing some timer // advances that should and shouldn't increment the counters. initGuardDog(stats_store_, config_miss_); + auto unpet_dog = + guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); // We'd better start at 0: - EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_miss").value()); - auto unpet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + checkMiss(0, "MissTest check 1"); // At 300ms we shouldn't have hit the timeout yet: time_system_->sleep(std::chrono::milliseconds(300)); guard_dog_->forceCheckForTest(); - EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_miss").value()); + checkMiss(0, "MissTest check 2"); // This should push it past the 500ms limit: time_system_->sleep(std::chrono::milliseconds(250)); guard_dog_->forceCheckForTest(); - EXPECT_EQ(1UL, stats_store_.counter("server.watchdog_miss").value()); + checkMiss(1, "MissTest check 3"); guard_dog_->stopWatching(unpet_dog); unpet_dog = nullptr; } @@ -229,17 +250,18 @@ TEST_P(GuardDogMissTest, MegaMissTest) { // This test checks the actual collected statistics after doing some timer // advances that should and shouldn't increment the counters. initGuardDog(stats_store_, config_mega_); - auto unpet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + auto unpet_dog = + guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); // We'd better start at 0: - EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_mega_miss").value()); + checkMegaMiss(0, "MegaMissTest check 1"); // This shouldn't be enough to increment the stat: time_system_->sleep(std::chrono::milliseconds(499)); guard_dog_->forceCheckForTest(); - EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_mega_miss").value()); + checkMegaMiss(0, "MegaMissTest check 2"); // Just 2ms more will make it greater than 500ms timeout: time_system_->sleep(std::chrono::milliseconds(2)); guard_dog_->forceCheckForTest(); - EXPECT_EQ(1UL, stats_store_.counter("server.watchdog_mega_miss").value()); + checkMegaMiss(1, "MegaMissTest check 3"); guard_dog_->stopWatching(unpet_dog); unpet_dog = nullptr; } @@ -254,7 +276,8 @@ TEST_P(GuardDogMissTest, MissCountTest) { // spurious condition_variable wakeup causes the counter to get incremented // more than it should be. initGuardDog(stats_store_, config_miss_); - auto sometimes_pet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + auto sometimes_pet_dog = + guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); // These steps are executed once without ever touching the watchdog. // Then the last step is to touch the watchdog and repeat the steps. // This verifies that the behavior is reset back to baseline after a touch. @@ -263,17 +286,17 @@ TEST_P(GuardDogMissTest, MissCountTest) { // This shouldn't be enough to increment the stat: time_system_->sleep(std::chrono::milliseconds(499)); guard_dog_->forceCheckForTest(); - EXPECT_EQ(i, stats_store_.counter("server.watchdog_miss").value()); + checkMiss(i, "MissCountTest check 1"); // And if we force re-execution of the loop it still shouldn't be: guard_dog_->forceCheckForTest(); - EXPECT_EQ(i, stats_store_.counter("server.watchdog_miss").value()); + checkMiss(i, "MissCountTest check 2"); // Just 2ms more will make it greater than 500ms timeout: time_system_->sleep(std::chrono::milliseconds(2)); guard_dog_->forceCheckForTest(); - EXPECT_EQ(i + 1, stats_store_.counter("server.watchdog_miss").value()); + checkMiss(i + 1, "MissCountTest check 3"); // Spurious wakeup, we should still only have one miss counted. guard_dog_->forceCheckForTest(); - EXPECT_EQ(i + 1, stats_store_.counter("server.watchdog_miss").value()); + checkMiss(i + 1, "MissCountTest check 4"); // When we finally touch the dog we should get one more increment once the // timeout value expires: sometimes_pet_dog->touch(); @@ -281,10 +304,10 @@ TEST_P(GuardDogMissTest, MissCountTest) { time_system_->sleep(std::chrono::milliseconds(1000)); sometimes_pet_dog->touch(); // Make sure megamiss still works: - EXPECT_EQ(0UL, stats_store_.counter("server.watchdog_mega_miss").value()); + checkMegaMiss(0UL, "MissCountTest check 5"); time_system_->sleep(std::chrono::milliseconds(1500)); guard_dog_->forceCheckForTest(); - EXPECT_EQ(1UL, stats_store_.counter("server.watchdog_mega_miss").value()); + checkMegaMiss(1UL, "MissCountTest check 6"); guard_dog_->stopWatching(sometimes_pet_dog); sometimes_pet_dog = nullptr; @@ -314,7 +337,8 @@ TEST_P(GuardDogTestBase, WatchDogThreadIdTest) { NiceMock stats; NiceMock config(100, 90, 1000, 500); initGuardDog(stats, config); - auto watched_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId()); + auto watched_dog = + guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); EXPECT_EQ(watched_dog->threadId().debugString(), api_->threadFactory().currentThreadId().debugString()); guard_dog_->stopWatching(watched_dog); diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index 38b87e72fb7b..f6636b7b6f1c 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -947,11 +947,11 @@ TEST_P(AdminInstanceTest, Runtime) { Runtime::MockLoader loader; auto layer1 = std::make_unique>(); auto layer2 = std::make_unique>(); - Runtime::Snapshot::EntryMap entries2{{"string_key", {"override", {}, {}, {}}}, - {"extra_key", {"bar", {}, {}, {}}}}; - Runtime::Snapshot::EntryMap entries1{{"string_key", {"foo", {}, {}, {}}}, - {"int_key", {"1", 1, {}, {}}}, - {"other_key", {"bar", {}, {}, {}}}}; + Runtime::Snapshot::EntryMap entries2{{"string_key", {"override", {}, {}, {}, {}}}, + {"extra_key", {"bar", {}, {}, {}, {}}}}; + Runtime::Snapshot::EntryMap entries1{{"string_key", {"foo", {}, {}, {}, {}}}, + {"int_key", {"1", 1, {}, {}, {}}}, + {"other_key", {"bar", {}, {}, {}, {}}}}; ON_CALL(*layer1, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer1"})); ON_CALL(*layer1, values()).WillByDefault(testing::ReturnRef(entries1)); diff --git a/test/server/lds_api_test.cc b/test/server/lds_api_test.cc index 1cedee3abb09..a5ffe1378ace 100644 --- a/test/server/lds_api_test.cc +++ b/test/server/lds_api_test.cc @@ -411,8 +411,7 @@ TEST_F(LdsApiTest, FailureSubscription) { setup(); EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, - {}); + lds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, {}); EXPECT_EQ("", lds_->versionInfo()); } diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc index ad0f08d1a8dd..b7805b5a9cc7 100644 --- a/test/server/listener_manager_impl_quic_only_test.cc +++ b/test/server/listener_manager_impl_quic_only_test.cc @@ -1,44 +1,80 @@ +#include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" + #include "test/server/listener_manager_impl_test.h" +#include "test/server/utility.h" #include "test/test_common/threadsafe_singleton_injector.h" -using testing::AtLeast; - namespace Envoy { namespace Server { namespace { -class ListenerManagerImplQuicOnlyTest : public ListenerManagerImplTest { -protected: - NiceMock os_sys_calls_; - TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; -}; +class ListenerManagerImplQuicOnlyTest : public ListenerManagerImplTest {}; -TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactory) { - const std::string proto_text = R"EOF( -address: { - socket_address: { +TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) { + const std::string yaml = TestEnvironment::substitute(R"EOF( +address: + socket_address: + address: 127.0.0.1 protocol: UDP - address: "127.0.0.1" port_value: 1234 - } -} -filter_chains: {} -udp_listener_config: { +filter_chains: +- filter_chain_match: + transport_protocol: "quic" + filters: [] + transport_socket: + name: quic + config: + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" + verify_subject_alt_name: + - localhost + - 127.0.0.1 +udp_listener_config: udp_listener_name: "quiche_quic_listener" - config: {} -} - )EOF"; - envoy::api::v2::Listener listener_proto; - EXPECT_TRUE(Protobuf::TextFormat::ParseFromString(proto_text, &listener_proto)); + )EOF", + Network::Address::IpVersion::v4); + envoy::api::v2::Listener listener_proto = parseListenerFromV2Yaml(yaml); EXPECT_CALL(server_.random_, uuid()); - EXPECT_CALL(listener_factory_, - createListenSocket(_, Network::Address::SocketType::Datagram, _, true)); - EXPECT_CALL(os_sys_calls_, setsockopt_(_, _, _, _, _)).Times(testing::AtLeast(1)); - EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); + expectCreateListenSocket(envoy::api::v2::core::SocketOption::STATE_PREBIND, +#ifdef SO_RXQ_OVFL + /* expected_num_options */ 2); +#else + /* expected_num_options */ 1); +#endif + expectSetsockopt(os_sys_calls_, + /* expected_sockopt_level */ IPPROTO_IP, + /* expected_sockopt_name */ ENVOY_IP_PKTINFO, + /* expected_value */ 1, + /* expected_num_calls */ 1); +#ifdef SO_RXQ_OVFL + expectSetsockopt(os_sys_calls_, + /* expected_sockopt_level */ SOL_SOCKET, + /* expected_sockopt_name */ SO_RXQ_OVFL, + /* expected_value */ 1, + /* expected_num_calls */ 1); +#endif + manager_->addOrUpdateListener(listener_proto, "", true); EXPECT_EQ(1u, manager_->listeners().size()); - EXPECT_NE(nullptr, manager_->listeners()[0].get().udpListenerFactory()); + EXPECT_FALSE(manager_->listeners()[0].get().udpListenerFactory()->isTransportConnectionless()); + + // No filter chain found with non-matching transport protocol. + EXPECT_EQ(nullptr, findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111)); + + auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "quic", {}, "8.8.8.8", 111); + ASSERT_NE(nullptr, filter_chain); + auto& quic_socket_factory = dynamic_cast( + filter_chain->transportSocketFactory()); + EXPECT_TRUE(quic_socket_factory.implementsSecureTransport()); + EXPECT_TRUE(quic_socket_factory.serverContextConfig().isReady()); } } // namespace diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 2840964f74a6..12c436b8ad60 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -13,8 +13,6 @@ #include "common/config/metadata.h" #include "common/network/address_impl.h" #include "common/network/io_socket_handle_impl.h" -#include "common/network/listen_socket_impl.h" -#include "common/network/socket_option_impl.h" #include "common/network/utility.h" #include "common/protobuf/protobuf.h" @@ -23,7 +21,6 @@ #include "test/server/utility.h" #include "test/test_common/registry.h" -#include "test/test_common/threadsafe_singleton_injector.h" #include "test/test_common/utility.h" #include "absl/strings/escaping.h" @@ -39,67 +36,6 @@ namespace { class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { public: - ListenerManagerImplWithRealFiltersTest() { - // Use real filter loading by default. - ON_CALL(listener_factory_, createNetworkFilterFactoryList(_, _)) - .WillByDefault(Invoke( - [](const Protobuf::RepeatedPtrField& filters, - Configuration::FactoryContext& context) -> std::vector { - return ProdListenerComponentFactory::createNetworkFilterFactoryList_(filters, - context); - })); - ON_CALL(listener_factory_, createListenerFilterFactoryList(_, _)) - .WillByDefault(Invoke( - [](const Protobuf::RepeatedPtrField& filters, - Configuration::ListenerFactoryContext& context) - -> std::vector { - return ProdListenerComponentFactory::createListenerFilterFactoryList_(filters, - context); - })); - ON_CALL(listener_factory_, createUdpListenerFilterFactoryList(_, _)) - .WillByDefault(Invoke( - [](const Protobuf::RepeatedPtrField& filters, - Configuration::ListenerFactoryContext& context) - -> std::vector { - return ProdListenerComponentFactory::createUdpListenerFilterFactoryList_(filters, - context); - })); - - socket_ = std::make_unique>(); - local_address_.reset(new Network::Address::Ipv4Instance("127.0.0.1", 1234)); - remote_address_.reset(new Network::Address::Ipv4Instance("127.0.0.1", 1234)); - EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); - } - - const Network::FilterChain* - findFilterChain(uint16_t destination_port, const std::string& destination_address, - const std::string& server_name, const std::string& transport_protocol, - const std::vector& application_protocols, - const std::string& source_address, uint16_t source_port) { - if (absl::StartsWith(destination_address, "/")) { - local_address_.reset(new Network::Address::PipeInstance(destination_address)); - } else { - local_address_ = - Network::Utility::parseInternetAddress(destination_address, destination_port); - } - ON_CALL(*socket_, localAddress()).WillByDefault(ReturnRef(local_address_)); - - ON_CALL(*socket_, requestedServerName()).WillByDefault(Return(absl::string_view(server_name))); - ON_CALL(*socket_, detectedTransportProtocol()) - .WillByDefault(Return(absl::string_view(transport_protocol))); - ON_CALL(*socket_, requestedApplicationProtocols()) - .WillByDefault(ReturnRef(application_protocols)); - - if (absl::StartsWith(source_address, "/")) { - remote_address_.reset(new Network::Address::PipeInstance(source_address)); - } else { - remote_address_ = Network::Utility::parseInternetAddress(source_address, source_port); - } - ON_CALL(*socket_, remoteAddress()).WillByDefault(ReturnRef(remote_address_)); - - return manager_->listeners().back().get().filterChainManager().findFilterChain(*socket_); - } - /** * Create an IPv4 listener with a given name. */ @@ -114,41 +50,6 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { return listener; } - /** - * Validate that createListenSocket is called once with the expected options. - */ - void - expectCreateListenSocket(const envoy::api::v2::core::SocketOption::SocketState& expected_state, - Network::Socket::Options::size_type expected_num_options) { - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true)) - .WillOnce(Invoke([this, expected_num_options, &expected_state]( - Network::Address::InstanceConstSharedPtr, Network::Address::SocketType, - const Network::Socket::OptionsSharedPtr& options, - bool) -> Network::SocketSharedPtr { - EXPECT_NE(options.get(), nullptr); - EXPECT_EQ(options->size(), expected_num_options); - EXPECT_TRUE( - Network::Socket::applyOptions(options, *listener_factory_.socket_, expected_state)); - return listener_factory_.socket_; - })); - } - - /** - * Validate that setsockopt() is called the expected number of times with the expected options. - */ - void expectSetsockopt(NiceMock& os_sys_calls, int expected_sockopt_level, - int expected_sockopt_name, int expected_value, - uint32_t expected_num_calls = 1) { - EXPECT_CALL(os_sys_calls, - setsockopt_(_, expected_sockopt_level, expected_sockopt_name, _, sizeof(int))) - .Times(expected_num_calls) - .WillRepeatedly( - Invoke([expected_value](int, int, int, const void* optval, socklen_t) -> int { - EXPECT_EQ(expected_value, *static_cast(optval)); - return 0; - })); - } - /** * Used by some tests below to validate that, if a given socket option is valid on this platform * and set in the Listener, it should result in a call to setsockopt() with the appropriate @@ -170,15 +71,6 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { EXPECT_EQ(0U, manager_->listeners().size()); } } - -protected: - NiceMock os_sys_calls_; - TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; - -private: - std::unique_ptr socket_; - Network::Address::InstanceConstSharedPtr local_address_; - Network::Address::InstanceConstSharedPtr remote_address_; }; class MockLdsApi : public LdsApi { diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 698982d93a7f..9247b95736bf 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -1,5 +1,8 @@ #include "envoy/admin/v2alpha/config_dump.pb.h" +#include "common/network/listen_socket_impl.h" +#include "common/network/socket_option_impl.h" + #include "server/configuration_impl.h" #include "server/listener_manager_impl.h" @@ -7,6 +10,7 @@ #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/threadsafe_singleton_injector.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -38,6 +42,36 @@ class ListenerManagerImplTest : public testing::Test { EXPECT_CALL(worker_factory_, createWorker_()).WillOnce(Return(worker_)); manager_ = std::make_unique(server_, listener_factory_, worker_factory_, false); + + // Use real filter loading by default. + ON_CALL(listener_factory_, createNetworkFilterFactoryList(_, _)) + .WillByDefault(Invoke( + [](const Protobuf::RepeatedPtrField& filters, + Configuration::FactoryContext& context) -> std::vector { + return ProdListenerComponentFactory::createNetworkFilterFactoryList_(filters, + context); + })); + ON_CALL(listener_factory_, createListenerFilterFactoryList(_, _)) + .WillByDefault(Invoke( + [](const Protobuf::RepeatedPtrField& filters, + Configuration::ListenerFactoryContext& context) + -> std::vector { + return ProdListenerComponentFactory::createListenerFilterFactoryList_(filters, + context); + })); + ON_CALL(listener_factory_, createUdpListenerFilterFactoryList(_, _)) + .WillByDefault(Invoke( + [](const Protobuf::RepeatedPtrField& filters, + Configuration::ListenerFactoryContext& context) + -> std::vector { + return ProdListenerComponentFactory::createUdpListenerFilterFactoryList_(filters, + context); + })); + + local_address_.reset(new Network::Address::Ipv4Instance("127.0.0.1", 1234)); + remote_address_.reset(new Network::Address::Ipv4Instance("127.0.0.1", 1234)); + EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); + socket_ = std::make_unique>(); } /** @@ -76,6 +110,71 @@ class ListenerManagerImplTest : public testing::Test { return raw_listener; } + const Network::FilterChain* + findFilterChain(uint16_t destination_port, const std::string& destination_address, + const std::string& server_name, const std::string& transport_protocol, + const std::vector& application_protocols, + const std::string& source_address, uint16_t source_port) { + if (absl::StartsWith(destination_address, "/")) { + local_address_.reset(new Network::Address::PipeInstance(destination_address)); + } else { + local_address_ = + Network::Utility::parseInternetAddress(destination_address, destination_port); + } + ON_CALL(*socket_, localAddress()).WillByDefault(ReturnRef(local_address_)); + + ON_CALL(*socket_, requestedServerName()).WillByDefault(Return(absl::string_view(server_name))); + ON_CALL(*socket_, detectedTransportProtocol()) + .WillByDefault(Return(absl::string_view(transport_protocol))); + ON_CALL(*socket_, requestedApplicationProtocols()) + .WillByDefault(ReturnRef(application_protocols)); + + if (absl::StartsWith(source_address, "/")) { + remote_address_.reset(new Network::Address::PipeInstance(source_address)); + } else { + remote_address_ = Network::Utility::parseInternetAddress(source_address, source_port); + } + ON_CALL(*socket_, remoteAddress()).WillByDefault(ReturnRef(remote_address_)); + + return manager_->listeners().back().get().filterChainManager().findFilterChain(*socket_); + } + + /** + * Validate that createListenSocket is called once with the expected options. + */ + void + expectCreateListenSocket(const envoy::api::v2::core::SocketOption::SocketState& expected_state, + Network::Socket::Options::size_type expected_num_options) { + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, true)) + .WillOnce(Invoke([this, expected_num_options, + &expected_state](const Network::Address::InstanceConstSharedPtr&, + Network::Address::SocketType, + const Network::Socket::OptionsSharedPtr& options, + bool) -> Network::SocketSharedPtr { + EXPECT_NE(options.get(), nullptr); + EXPECT_EQ(options->size(), expected_num_options); + EXPECT_TRUE( + Network::Socket::applyOptions(options, *listener_factory_.socket_, expected_state)); + return listener_factory_.socket_; + })); + } + + /** + * Validate that setsockopt() is called the expected number of times with the expected options. + */ + void expectSetsockopt(NiceMock& os_sys_calls, int expected_sockopt_level, + int expected_sockopt_name, int expected_value, + uint32_t expected_num_calls = 1) { + EXPECT_CALL(os_sys_calls, + setsockopt_(_, expected_sockopt_level, expected_sockopt_name, _, sizeof(int))) + .Times(expected_num_calls) + .WillRepeatedly( + Invoke([expected_value](int, int, int, const void* optval, socklen_t) -> int { + EXPECT_EQ(expected_value, *static_cast(optval)); + return 0; + })); + } + void checkStats(uint64_t added, uint64_t modified, uint64_t removed, uint64_t warming, uint64_t active, uint64_t draining) { EXPECT_EQ(added, server_.stats_store_.counter("listener_manager.listener_added").value()); @@ -105,6 +204,8 @@ class ListenerManagerImplTest : public testing::Test { EXPECT_EQ(expected_listeners_config_dump.DebugString(), listeners_config_dump.DebugString()); } + NiceMock os_sys_calls_; + TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; NiceMock server_; NiceMock listener_factory_; MockWorker* worker_ = new MockWorker(); @@ -113,6 +214,9 @@ class ListenerManagerImplTest : public testing::Test { NiceMock guard_dog_; Event::SimulatedTimeSystem time_system_; Api::ApiPtr api_; + Network::Address::InstanceConstSharedPtr local_address_; + Network::Address::InstanceConstSharedPtr remote_address_; + std::unique_ptr socket_; }; } // namespace Server diff --git a/test/server/worker_impl_test.cc b/test/server/worker_impl_test.cc index 1c9273ddb12c..c7bc36e70941 100644 --- a/test/server/worker_impl_test.cc +++ b/test/server/worker_impl_test.cc @@ -28,7 +28,7 @@ class WorkerImplTest : public testing::Test { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher()), no_exit_timer_(dispatcher_->createTimer([]() -> void {})), worker_(tls_, hooks_, std::move(dispatcher_), Network::ConnectionHandlerPtr{handler_}, - overload_manager_, *api_) { + overload_manager_, *api_, "worker_test") { // In the real worker the watchdog has timers that prevent exit. Here we need to prevent event // loop exit since we use mock timers. no_exit_timer_->enableTimer(std::chrono::hours(1)); diff --git a/test/server/zipkin_tracing.yaml b/test/server/zipkin_tracing.yaml index a8fd31b5aadf..8a525f77f1cf 100644 --- a/test/server/zipkin_tracing.yaml +++ b/test/server/zipkin_tracing.yaml @@ -8,3 +8,4 @@ tracing: config: collector_cluster: zipkin collector_endpoint: "/api/v1/spans" + collector_endpoint_version: HTTP_JSON diff --git a/test/tools/router_check/coverage.cc b/test/tools/router_check/coverage.cc index 3e81b105097f..92920864a90d 100644 --- a/test/tools/router_check/coverage.cc +++ b/test/tools/router_check/coverage.cc @@ -53,7 +53,25 @@ double Coverage::report() { return 100 * static_cast(covered_routes_.size()) / num_routes; } +void Coverage::printMissingTests(const std::set& all_route_names, + const std::set& covered_route_names) { + std::set missing_route_names; + std::set_difference(all_route_names.begin(), all_route_names.end(), covered_route_names.begin(), + covered_route_names.end(), + std::inserter(missing_route_names, missing_route_names.end())); + for (const auto& host : route_config_.virtual_hosts()) { + for (const auto& route : host.routes()) { + if (missing_route_names.find(route.name()) != missing_route_names.end()) { + std::cout << "Missing test for host: " << host.name() + << ", route: " << route.match().DebugString() << std::endl; + } + } + } +} + double Coverage::detailedReport() { + std::set all_route_names; + std::set covered_route_names; uint64_t num_routes = 0; for (const auto& host : route_config_.virtual_hosts()) { for (const auto& route : host.routes()) { @@ -62,12 +80,15 @@ double Coverage::detailedReport() { } else { num_routes += 1; } + all_route_names.emplace(route.name()); } } double cumulative_coverage = 0; for (auto& covered_route : covered_routes_) { cumulative_coverage += covered_route->report(); + covered_route_names.emplace(covered_route->route().routeName()); } + printMissingTests(all_route_names, covered_route_names); return 100 * cumulative_coverage / num_routes; } diff --git a/test/tools/router_check/coverage.h b/test/tools/router_check/coverage.h index c2327449d89b..14c993a8eddc 100644 --- a/test/tools/router_check/coverage.h +++ b/test/tools/router_check/coverage.h @@ -17,6 +17,7 @@ class RouteCoverage : Logger::Loggable { void setHostRewriteCovered() { host_rewrite_covered_ = true; } void setRedirectPathCovered() { redirect_path_covered_ = true; } bool covers(const Envoy::Router::RouteEntry* route) { return &route_ == route; } + const Envoy::Router::RouteEntry& route() { return route_; } private: const Envoy::Router::RouteEntry& route_; @@ -45,6 +46,8 @@ class Coverage : Logger::Loggable { void markRedirectPathCovered(const Envoy::Router::RouteEntry& route); double report(); double detailedReport(); + void printMissingTests(const std::set& all_route_names, + const std::set& covered_route_names); private: RouteCoverage& coveredRoute(const Envoy::Router::RouteEntry& route); diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 0547c15e03be..e5a53aa9a6dc 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -8,6 +8,7 @@ #include "common/network/utility.h" #include "common/protobuf/message_validator_impl.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_impl.h" #include "common/stream_info/stream_info_impl.h" #include "test/test_common/printers.h" @@ -71,6 +72,7 @@ RouterCheckTool RouterCheckTool::create(const std::string& router_config_file, auto stats = std::make_unique(); auto api = Api::createApiForTest(*stats); TestUtility::loadFromFile(router_config_file, route_config, *api); + assignUniqueRouteNames(route_config); auto factory_context = std::make_unique>(); auto config = std::make_unique(route_config, *factory_context, false); @@ -84,6 +86,15 @@ RouterCheckTool RouterCheckTool::create(const std::string& router_config_file, std::move(api), Coverage(route_config)); } +void RouterCheckTool::assignUniqueRouteNames(envoy::api::v2::RouteConfiguration& route_config) { + Runtime::RandomGeneratorImpl random; + for (auto& host : *route_config.mutable_virtual_hosts()) { + for (auto& route : *host.mutable_routes()) { + route.set_name(random.uuid()); + } + } +} + RouterCheckTool::RouterCheckTool( std::unique_ptr> factory_context, std::unique_ptr config, std::unique_ptr stats, diff --git a/test/tools/router_check/router.h b/test/tools/router_check/router.h index 78352e86e5d5..17635b2cd898 100644 --- a/test/tools/router_check/router.h +++ b/test/tools/router_check/router.h @@ -106,6 +106,11 @@ class RouterCheckTool : Logger::Loggable { std::unique_ptr config, std::unique_ptr stats, Api::ApiPtr api, Coverage coverage); + /** + * Set UUID as the name for each route for detecting missing tests during the coverage check. + */ + static void assignUniqueRouteNames(envoy::api::v2::RouteConfiguration& route_config); + bool compareCluster(ToolConfig& tool_config, const std::string& expected); bool compareCluster(ToolConfig& tool_config, const envoy::RouterCheckToolSchema::ValidationAssert& expected); diff --git a/test/tools/router_check/test/route_tests.sh b/test/tools/router_check/test/route_tests.sh index 74a4eeccc0df..26a5a2a010d6 100755 --- a/test/tools/router_check/test/route_tests.sh +++ b/test/tools/router_check/test/route_tests.sh @@ -86,3 +86,11 @@ FAILURE_OUTPUT=$("${PATH_BIN}" "-c" "${PATH_CONFIG}/TestRoutes.yaml" "-t" "${PAT if [[ "${FAILURE_OUTPUT}" != *"Test_2"*"expected: [cluster1], actual: [instant-server], test type: cluster_name"* ]] || [[ "${FAILURE_OUTPUT}" == *"Test_1"* ]]; then exit 1 fi + +# Missing test results +echo "testing missing tests output test cases" +MISSING_OUTPUT=$("${PATH_BIN}" "-c" "${PATH_CONFIG}/TestRoutes.yaml" "-t" "${PATH_CONFIG}/TestRoutes.golden.proto.json" "--details" "--useproto" "--covall" 2>&1) || + echo "${MISSING_OUTPUT:-no-output}" +if [[ "${MISSING_OUTPUT}" != *"Missing test for host: www2_staging, route: prefix: \"/\""*"Missing test for host: default, route: prefix: \"/api/application_data\""* ]]; then + exit 1 +fi diff --git a/tools/api/generate_go_protobuf.py b/tools/api/generate_go_protobuf.py new file mode 100755 index 000000000000..3c49dd19d895 --- /dev/null +++ b/tools/api/generate_go_protobuf.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 + +from subprocess import check_output +from subprocess import check_call +import glob +import os +import shutil +import sys +import re + +TARGETS = '@envoy_api//...' +IMPORT_BASE = 'github.com/envoyproxy/go-control-plane' +OUTPUT_BASE = 'build_go' +REPO_BASE = 'go-control-plane' +BRANCH = 'master' +MIRROR_MSG = 'Mirrored from envoyproxy/envoy @ ' +USER_NAME = 'go-control-plane(CircleCI)' +USER_EMAIL = 'go-control-plane@users.noreply.github.com' + + +def generateProtobufs(output): + bazel_bin = check_output(['bazel', 'info', 'bazel-bin']).decode().strip() + go_protos = check_output([ + 'bazel', + 'query', + 'kind("go_proto_library", %s)' % TARGETS, + ]).split() + + # Each rule has the form @envoy_api//foo/bar:baz_go_proto. + # First build all the rules to ensure we have the output files. + check_call(['bazel', 'build', '-c', 'fastbuild'] + go_protos) + + for rule in go_protos: + # Example rule: + # @envoy_api//envoy/config/bootstrap/v2:pkg_go_proto + # + # Example generated directory: + # bazel-bin/external/envoy_api/envoy/config/bootstrap/v2/linux_amd64_stripped/pkg_go_proto%/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v2/ + # + # Example output directory: + # go_out/envoy/config/bootstrap/v2 + rule_dir, proto = rule.decode()[len('@envoy_api//'):].rsplit(':', 1) + input_dir = os.path.join(bazel_bin, 'external', 'envoy_api', rule_dir, 'linux_amd64_stripped', + proto + '%', IMPORT_BASE, rule_dir) + input_files = glob.glob(os.path.join(input_dir, '*.go')) + output_dir = os.path.join(output, rule_dir) + + # Ensure the output directory exists + os.makedirs(output_dir, 0o755, exist_ok=True) + for generated_file in input_files: + shutil.copy(generated_file, output_dir) + print('Go artifacts placed into: ' + output) + + +def git(repo, *args): + cmd = ['git'] + if repo: + cmd = cmd + ['-C', repo] + for arg in args: + cmd = cmd + [arg] + return check_output(cmd).decode() + + +def cloneGoProtobufs(repo): + # Create a local clone of go-control-plane + git(None, 'clone', 'git@github.com:envoyproxy/go-control-plane', repo) + git(repo, 'fetch') + git(repo, 'checkout', '-B', BRANCH, 'origin/master') + + +def findLastSyncSHA(repo): + # Determine last envoyproxy/envoy SHA in envoyproxy/go-control-plane + last_commit = git(repo, 'log', '--grep=' + MIRROR_MSG, '-n', '1', '--format=%B').strip() + # Initial SHA from which the APIs start syncing. Prior to that it was done manually. + if last_commit == "": + return 'e7f0b7176efdc65f96eb1697b829d1e6187f4502' + m = re.search(MIRROR_MSG + '(\w+)', last_commit) + return m.group(1) + + +def updatedSinceSHA(repo, last_sha): + # Determine if there are changes to API since last SHA + return git(None, 'rev-list', '%s..HEAD' % last_sha, 'api/envoy').split() + + +def syncGoProtobufs(output, repo): + # Sync generated content against repo and return true if there is a commit necessary + dst = os.path.join(repo, 'envoy') + # Remove subtree at envoy in repo + git(repo, 'rm', '-r', 'envoy') + # Copy subtree at envoy from output to repo + shutil.copytree(os.path.join(output, 'envoy'), dst) + + +def publishGoProtobufs(repo, sha): + # Publish generated files with the last SHA changes to API + git(repo, 'config', 'user.name', USER_NAME) + git(repo, 'config', 'user.email', USER_EMAIL) + git(repo, 'add', 'envoy') + git(repo, 'commit', '-s', '-m', MIRROR_MSG + sha) + git(repo, 'push', 'origin', BRANCH) + + +if __name__ == "__main__": + workspace = check_output(['bazel', 'info', 'workspace']).decode().strip() + output = os.path.join(workspace, OUTPUT_BASE) + generateProtobufs(output) + repo = os.path.join(workspace, REPO_BASE) + cloneGoProtobufs(repo) + last_sha = findLastSyncSHA(repo) + changes = updatedSinceSHA(repo, last_sha) + if changes: + print('Changes detected: %s' % changes) + syncGoProtobufs(output, repo) + publishGoProtobufs(repo, changes[0]) diff --git a/tools/api_proto_plugin/annotations.py b/tools/api_proto_plugin/annotations.py index eadd080b03aa..b974b5f98019 100644 --- a/tools/api_proto_plugin/annotations.py +++ b/tools/api_proto_plugin/annotations.py @@ -28,14 +28,10 @@ # proto compatibility status. PROTO_STATUS_ANNOTATION = 'proto-status' -# Where v2 differs from v1.. -V2_API_DIFF_ANNOTATION = 'v2-api-diff' - VALID_ANNOTATIONS = set([ DOC_TITLE_ANNOTATION, NOT_IMPLEMENTED_WARN_ANNOTATION, NOT_IMPLEMENTED_HIDE_ANNOTATION, - V2_API_DIFF_ANNOTATION, NEXT_MAJOR_VERSION_ANNOTATION, COMMENT_ANNOTATION, PROTO_STATUS_ANNOTATION, diff --git a/tools/api_proto_plugin/plugin.py b/tools/api_proto_plugin/plugin.py index 0a56ea8e3f8b..66223f9e1566 100644 --- a/tools/api_proto_plugin/plugin.py +++ b/tools/api_proto_plugin/plugin.py @@ -54,4 +54,9 @@ def Plugin(output_suffix, visitor): stats_file.name = file_proto.name + output_suffix + '.profile' ps.print_stats() stats_file.content = stats_stream.getvalue() + # Also include the original FileDescriptorProto as text proto, this is + # useful when debugging. + descriptor_file = response.file.add() + descriptor_file.name = file_proto.name + ".descriptor.proto" + descriptor_file.content = str(file_proto) sys.stdout.buffer.write(response.SerializeToString()) diff --git a/tools/api_proto_plugin/traverse.py b/tools/api_proto_plugin/traverse.py index 6ad97b8699aa..57c08f664b01 100644 --- a/tools/api_proto_plugin/traverse.py +++ b/tools/api_proto_plugin/traverse.py @@ -3,6 +3,20 @@ from tools.api_proto_plugin import type_context +def TraverseService(type_context, service_proto, visitor): + """Traverse an enum definition. + + Args: + type_context: type_context.TypeContext for service type. + service_proto: ServiceDescriptorProto for service. + visitor: visitor.Visitor defining the business logic of the plugin. + + Returns: + Plugin specific output. + """ + return visitor.VisitService(service_proto, type_context) + + def TraverseEnum(type_context, enum_proto, visitor): """Traverse an enum definition. @@ -61,6 +75,10 @@ def TraverseFile(file_proto, visitor): """ source_code_info = type_context.SourceCodeInfo(file_proto.name, file_proto.source_code_info) package_type_context = type_context.TypeContext(source_code_info, file_proto.package) + services = [ + TraverseService(package_type_context.ExtendService(index, service.name), service, visitor) + for index, service in enumerate(file_proto.service) + ] msgs = [ TraverseMessage(package_type_context.ExtendMessage(index, msg.name), msg, visitor) for index, msg in enumerate(file_proto.message_type) @@ -69,4 +87,4 @@ def TraverseFile(file_proto, visitor): TraverseEnum(package_type_context.ExtendEnum(index, enum.name), enum, visitor) for index, enum in enumerate(file_proto.enum_type) ] - return visitor.VisitFile(file_proto, package_type_context, msgs, enums) + return visitor.VisitFile(file_proto, package_type_context, services, msgs, enums) diff --git a/tools/api_proto_plugin/type_context.py b/tools/api_proto_plugin/type_context.py index d69c120a1a63..e88f02404193 100644 --- a/tools/api_proto_plugin/type_context.py +++ b/tools/api_proto_plugin/type_context.py @@ -77,6 +77,36 @@ def LeadingCommentPathLookup(self, path): annotations.ExtractAnnotations(location.leading_comments, self.file_level_annotations)) return Comment('', {}) + def LeadingDetachedCommentsPathLookup(self, path): + """Lookup leading detached comments by path in SourceCodeInfo. + + Args: + path: a list of path indexes as per + https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. + + Returns: + List of detached comment strings. + """ + location = self.LocationPathLookup(path) + if location is not None and location.leading_detached_comments != self.file_level_comments: + return location.leading_detached_comments + return [] + + def TrailingCommentPathLookup(self, path): + """Lookup trailing comment by path in SourceCodeInfo. + + Args: + path: a list of path indexes as per + https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. + + Returns: + Raw detached comment string + """ + location = self.LocationPathLookup(path) + if location is not None: + return location.trailing_comments + return '' + class TypeContext(object): """Contextual information for a message/field. @@ -157,6 +187,15 @@ def ExtendEnum(self, index, name): """ return self._Extend([5, index], 'enum', name) + def ExtendService(self, index, name): + """Extend type context with a service. + + Args: + index: service index in file. + name: service name. + """ + return self._Extend([6, index], 'service', name) + def ExtendNestedEnum(self, index, name): """Extend type context with a nested enum. @@ -184,6 +223,15 @@ def ExtendOneof(self, index, name): """ return self._Extend([8, index], 'oneof', name) + def ExtendMethod(self, index, name): + """Extend type context with a service method declaration. + + Args: + index: method index in service. + name: method name. + """ + return self._Extend([2, index], 'method', name) + @property def location(self): """SourceCodeInfo.Location for type context.""" @@ -193,3 +241,13 @@ def location(self): def leading_comment(self): """Leading comment for type context.""" return self.source_code_info.LeadingCommentPathLookup(self.path) + + @property + def leading_detached_comments(self): + """Leading detached comments for type context.""" + return self.source_code_info.LeadingDetachedCommentsPathLookup(self.path) + + @property + def trailing_comment(self): + """Trailing comment for type context.""" + return self.source_code_info.TrailingCommentPathLookup(self.path) diff --git a/tools/api_proto_plugin/visitor.py b/tools/api_proto_plugin/visitor.py index 0065537f0a6e..1dfd361fdba5 100644 --- a/tools/api_proto_plugin/visitor.py +++ b/tools/api_proto_plugin/visitor.py @@ -4,6 +4,18 @@ class Visitor(object): """Abstract visitor interface for api_proto_plugin implementation.""" + def VisitService(self, service_proto, type_context): + """Visit a service definition. + + Args: + service_proto: ServiceDescriptorProto for service. + type_context: type_context.TypeContext for service type. + + Returns: + Plugin specific output. + """ + pass + def VisitEnum(self, enum_proto, type_context): """Visit an enum definition. @@ -30,12 +42,13 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): """ pass - def VisitFile(self, file_proto, type_context, msgs, enums): + def VisitFile(self, file_proto, type_context, services, msgs, enums): """Visit a proto file definition. Args: file_proto: FileDescriptorProto for file. type_context: type_context.TypeContext for file. + services: a list of results from visiting services. msgs: a list of results from visiting messages. enums: a list of results from visiting enums. diff --git a/tools/check_format.py b/tools/check_format.py index 21bf410ae5fd..3960612a671b 100755 --- a/tools/check_format.py +++ b/tools/check_format.py @@ -43,8 +43,9 @@ "./test/integration/integration.h") # Files in these paths can use MessageLite::SerializeAsString -SERIALIZE_AS_STRING_WHITELIST = ("./test/common/protobuf/utility_test.cc", - "./test/common/grpc/codec_test.cc") +SERIALIZE_AS_STRING_WHITELIST = ( + "./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc", + "./test/common/protobuf/utility_test.cc", "./test/common/grpc/codec_test.cc") # Files in these paths can use Protobuf::util::JsonStringToMessage JSON_STRING_TO_MESSAGE_WHITELIST = ("./source/common/protobuf/utility.cc") @@ -59,6 +60,9 @@ "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/http/admin.h", "./source/server/http/admin.cc") +# Only one C++ file should instantiate grpc_init +GRPC_INIT_WHITELIST = ("./source/common/grpc/google_grpc_context.cc") + CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-8") BUILDIFIER_PATH = os.getenv("BUILDIFIER_BIN", "$GOPATH/bin/buildifier") ENVOY_BUILD_FIXER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), @@ -69,10 +73,6 @@ INCLUDE_ANGLE_LEN = len(INCLUDE_ANGLE) PROTO_PACKAGE_REGEX = re.compile(r"^package (\S+);\n*", re.MULTILINE) X_ENVOY_USED_DIRECTLY_REGEX = re.compile(r'.*\"x-envoy-.*\".*') -PROTO_OPTION_JAVA_PACKAGE = "option java_package = \"" -PROTO_OPTION_JAVA_OUTER_CLASSNAME = "option java_outer_classname = \"" -PROTO_OPTION_JAVA_MULTIPLE_FILES = "option java_multiple_files = " -PROTO_OPTION_GO_PACKAGE = "option go_package = \"" # yapf: disable PROTOBUF_TYPE_ERRORS = { @@ -249,21 +249,6 @@ def checkNamespace(file_path): return [] -# If the substring is not found in the file, then insert to_add -def insertProtoOptionIfNotFound(substring, file_path, to_add): - text = None - with open(file_path) as f: - text = f.read() - - if not substring in text: - - def repl(m): - return m.group(0).rstrip() + "\n\n" + to_add + "\n" - - with open(file_path, "w") as f: - f.write(re.sub(PROTO_PACKAGE_REGEX, repl, text)) - - def packageNameForProto(file_path): package_name = None error_message = [] @@ -277,32 +262,6 @@ def packageNameForProto(file_path): return [package_name, error_message] -def fixJavaPackageProtoOption(file_path): - package_name = packageNameForProto(file_path)[0] - to_add = PROTO_OPTION_JAVA_PACKAGE + "io.envoyproxy.{}\";".format(package_name) - insertProtoOptionIfNotFound("\n" + PROTO_OPTION_JAVA_PACKAGE, file_path, to_add) - return [] - - -# Add "option java_outer_classname = FooBarProto;" for foo_bar.proto -def fixJavaOuterClassnameProtoOption(file_path): - file_name = os.path.basename(file_path)[:-len(".proto")] - if "-" in file_name or "." in file_name or not file_name.islower(): - return ["Unable to decide java_outer_classname for proto file: %s" % file_path] - - to_add = PROTO_OPTION_JAVA_OUTER_CLASSNAME \ - + "".join(x.title() for x in file_name.split("_")) \ - + "Proto\";" - insertProtoOptionIfNotFound("\n" + PROTO_OPTION_JAVA_OUTER_CLASSNAME, file_path, to_add) - return [] - - -def fixJavaMultipleFilesProtoOption(file_path): - to_add = PROTO_OPTION_JAVA_MULTIPLE_FILES + "true;" - insertProtoOptionIfNotFound("\n" + PROTO_OPTION_JAVA_MULTIPLE_FILES, file_path, to_add) - return [] - - # To avoid breaking the Lyft import, we just check for path inclusion here. def whitelistedForProtobufDeps(file_path): return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \ @@ -330,6 +289,10 @@ def whitelistedForStdRegex(file_path): return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST +def whitelistedForGrpcInit(file_path): + return file_path in GRPC_INIT_WHITELIST + + def findSubstringAndReturnError(pattern, file_path, error_message): with open(file_path) as f: text = f.read() @@ -583,6 +546,18 @@ def checkSourceLine(line, file_path, reportError): if not whitelistedForStdRegex(file_path) and "std::regex" in line: reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher") + if not whitelistedForGrpcInit(file_path): + grpc_init_or_shutdown = line.find("grpc_init()") + grpc_shutdown = line.find("grpc_shutdown()") + if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and + grpc_shutdown < grpc_init_or_shutdown): + grpc_init_or_shutdown = grpc_shutdown + if grpc_init_or_shutdown != -1: + comment = line.find("// ") + if comment == -1 or comment > grpc_init_or_shutdown: + reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " + + "Grpc::GoogleGrpcContext. See #8282") + def checkBuildLine(line, file_path, reportError): if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/")): @@ -657,10 +632,6 @@ def fixSourcePath(file_path): package_name, error_message = packageNameForProto(file_path) if package_name is None: error_messages += error_message - else: - error_messages += fixJavaMultipleFilesProtoOption(file_path) - error_messages += fixJavaOuterClassnameProtoOption(file_path) - error_messages += fixJavaPackageProtoOption(file_path) return error_messages @@ -680,16 +651,6 @@ def checkSourcePath(file_path): package_name, error_message = packageNameForProto(file_path) if package_name is None: error_messages += error_message - else: - error_messages += errorIfNoSubstringFound("\n" + PROTO_OPTION_JAVA_PACKAGE, file_path, - "Java proto option 'java_package' not set") - error_messages += errorIfNoSubstringFound("\n" + PROTO_OPTION_JAVA_OUTER_CLASSNAME, file_path, - "Java proto option 'java_outer_classname' not set") - error_messages += errorIfNoSubstringFound("\n" + PROTO_OPTION_JAVA_MULTIPLE_FILES, file_path, - "Java proto option 'java_multiple_files' not set") - with open(file_path) as f: - if PROTO_OPTION_GO_PACKAGE in f.read(): - error_messages += ["go_package option should not be set in %s" % file_path] return error_messages diff --git a/tools/check_format_test_helper.py b/tools/check_format_test_helper.py index 844488a8aa35..c065a6716979 100755 --- a/tools/check_format_test_helper.py +++ b/tools/check_format_test_helper.py @@ -219,6 +219,14 @@ def checkFileExpectingOK(filename): "Don't lookup stats by name at runtime; use StatName saved during construction") errors += checkUnfixableError( "regex.cc", "Don't use std::regex in code that handles untrusted input. Use RegexMatcher") + errors += checkUnfixableError( + "grpc_init.cc", + "Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. " + + "See #8282") + errors += checkUnfixableError( + "grpc_shutdown.cc", + "Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. " + + "See #8282") errors += fixFileExpectingFailure( "api/missing_package.proto", @@ -237,8 +245,6 @@ def checkFileExpectingOK(filename): errors += checkAndFixError("license.BUILD", "envoy_build_fixer check failed") errors += checkAndFixError("bad_envoy_build_sys_ref.BUILD", "Superfluous '@envoy//' prefix") errors += checkAndFixError("proto_format.proto", "clang-format check failed") - errors += checkAndFixError("api/java_options.proto", "Java proto option") - errors += checkFileExpectingError("api/go_package.proto", "go_package option should not be set") errors += checkAndFixError( "cpp_std.cc", "term absl::make_unique< should be replaced with standard library term std::make_unique<") diff --git a/tools/proto_format.sh b/tools/proto_format.sh new file mode 100755 index 000000000000..e2237beff04d --- /dev/null +++ b/tools/proto_format.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Reformat API protos to canonical proto style using protoxform. + +set -e + +# Clean up any stale files in the API tree output. Bazel remembers valid cached +# files still. +rm -rf bazel-bin/external/envoy_api + +# TODO(htuch): This script started life by cloning docs/build.sh. It depends on +# the @envoy_api//docs:protos target in a few places as a result. This is not +# the precise set of protos we want to format, but as a starting place it seems +# reasonable. In the future, we should change the logic here. +bazel build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \ + tools/protoxform/protoxform.bzl%proto_xform_aspect --output_groups=proto --action_env=CPROFILE_ENABLED=1 \ + --spawn_strategy=standalone --host_force_python=PY3 + +# We do ** matching below to deal with Bazel cache blah (source proto artifacts +# are nested inside source package targets). +shopt -s globstar + +# Find all source protos. +declare -r PROTO_TARGET=$(bazel query "labels(srcs, labels(deps, @envoy_api//docs:protos))") + +# Copy protos from Bazel build-cache back into source tree. +for p in ${PROTO_TARGET} +do + declare PROTO_FILE_WITHOUT_PREFIX="${p#@envoy_api//}" + declare PROTO_FILE_CANONICAL="${PROTO_FILE_WITHOUT_PREFIX/://}" + # We use ** glob matching here to deal with the fact that we have something + # like + # bazel-bin/external/envoy_api/envoy/admin/v2alpha/pkg/envoy/admin/v2alpha/certs.proto.proto + # and we don't want to have to do a nested loop and slow bazel query to + # recover the canonical package part of the path. + declare SRCS=(bazel-bin/external/envoy_api/**/"${PROTO_FILE_CANONICAL}.proto") + # While we may have reformatted the file multiple times due to the transitive + # dependencies in the aspect above, they all look the same. So, just pick an + # arbitrary match and we're done. + declare SRC="${SRCS[0]}" + declare DST="api/${PROTO_FILE_CANONICAL}" + + if [[ "$1" == "fix" ]] + then + [[ -f "${DST}" ]] + cp -f "${SRC}" "${DST}" + else + diff ${SRC} "${DST}" || \ + (echo "$0 mismatch, either run ./ci/do_ci.sh fix_format or $0 fix to reformat."; exit 1) + fi +done diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index e386757a5228..626bfc896b19 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -72,9 +72,6 @@ def FormatCommentWithAnnotations(comment, type_name=''): s = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') if annotations.NOT_IMPLEMENTED_WARN_ANNOTATION in comment.annotations: s += '\n.. WARNING::\n Not implemented yet\n' - if annotations.V2_API_DIFF_ANNOTATION in comment.annotations: - s += '\n.. NOTE::\n **v2 API difference**: ' + comment.annotations[ - annotations.V2_API_DIFF_ANNOTATION] + '\n' if type_name == 'message' or type_name == 'enum': if annotations.PROTO_STATUS_ANNOTATION in comment.annotations: status = comment.annotations[annotations.PROTO_STATUS_ANNOTATION] @@ -493,7 +490,7 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) - def VisitFile(self, file_proto, type_context, msgs, enums): + def VisitFile(self, file_proto, type_context, services, msgs, enums): # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) diff --git a/tools/protoxform/BUILD b/tools/protoxform/BUILD new file mode 100644 index 000000000000..780ad11d4fd2 --- /dev/null +++ b/tools/protoxform/BUILD @@ -0,0 +1,14 @@ +licenses(["notice"]) # Apache 2 + +py_binary( + name = "protoxform", + srcs = ["protoxform.py"], + python_version = "PY3", + visibility = ["//visibility:public"], + deps = [ + "//tools/api_proto_plugin", + "@com_envoyproxy_protoc_gen_validate//validate:validate_py", + "@com_google_googleapis//google/api:annotations_py_proto", + "@com_google_protobuf//:protobuf_python", + ], +) diff --git a/tools/protoxform/protoxform.bzl b/tools/protoxform/protoxform.bzl new file mode 100644 index 000000000000..e20650ddfb72 --- /dev/null +++ b/tools/protoxform/protoxform.bzl @@ -0,0 +1,95 @@ +# TODO(htuch): this is a clone+modify from //tools/protodoc:protodoc.bzl. +# Factor out common parts for this kind of API protoc aspect. + +# Borrowed from +# https://github.com/bazelbuild/rules_go/blob/master/proto/toolchain.bzl. This +# does some magic munging to remove workspace prefixes from output paths to +# convert path as understood by Bazel into paths as understood by protoc. +def _proto_path(proto): + """ + The proto path is not really a file path + It's the path to the proto that was seen when the descriptor file was generated. + """ + path = proto.path + root = proto.root.path + ws = proto.owner.workspace_root + if path.startswith(root): + path = path[len(root):] + if path.startswith("/"): + path = path[1:] + if path.startswith(ws): + path = path[len(ws):] + if path.startswith("/"): + path = path[1:] + return path + +# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) +# that can be invoked from the CLI to perform API transforms via //tools/protoxform for +# proto_library targets. Example use: +# +# bazel build //api --aspects tools/protoxform/protoxform.bzl%proto_xform_aspect \ +# --output_groups=proto +def _proto_xform_aspect_impl(target, ctx): + # Compute .proto files from the current proto_library node's dependencies. + transitive_outputs = depset(transitive = [dep.output_groups["proto"] for dep in ctx.rule.attr.deps]) + proto_sources = target[ProtoInfo].direct_sources + + # If this proto_library doesn't actually name any sources, e.g. //api:api, + # but just glues together other libs, we just need to follow the graph. + if not proto_sources: + return [OutputGroupInfo(proto = transitive_outputs)] + + # Figure out the set of import paths. Ideally we would use descriptor sets + # built by proto_library, which avoid having to do nasty path mangling, but + # these don't include source_code_info, which we need for comment + # extractions. See https://github.com/bazelbuild/bazel/issues/3971. + import_paths = [] + for f in target[ProtoInfo].transitive_sources.to_list(): + if f.root.path: + import_path = f.root.path + "/" + f.owner.workspace_root + else: + import_path = f.owner.workspace_root + if import_path: + import_paths += [import_path] + + # The outputs live in the ctx.label's package root. We add some additional + # path information to match with protoc's notion of path relative locations. + outputs = [ctx.actions.declare_file(ctx.label.name + "/" + _proto_path(f) + + ".proto") for f in proto_sources] + + # Create the protoc command-line args. + ctx_path = ctx.label.package + "/" + ctx.label.name + output_path = outputs[0].root.path + "/" + outputs[0].owner.workspace_root + "/" + ctx_path + args = ["-I./" + ctx.label.workspace_root] + args += ["-I" + import_path for import_path in import_paths] + args += ["--plugin=protoc-gen-protoxform=" + ctx.executable._protoxform.path, "--protoxform_out=" + output_path] + args += [_proto_path(src) for src in target[ProtoInfo].direct_sources] + ctx.actions.run( + executable = ctx.executable._protoc, + arguments = args, + inputs = target[ProtoInfo].transitive_sources, + tools = [ctx.executable._protoxform], + outputs = outputs, + mnemonic = "protoxform", + use_default_shell_env = True, + ) + + transitive_outputs = depset(outputs, transitive = [transitive_outputs]) + return [OutputGroupInfo(proto = transitive_outputs)] + +proto_xform_aspect = aspect( + attr_aspects = ["deps"], + attrs = { + "_protoc": attr.label( + default = Label("@com_google_protobuf//:protoc"), + executable = True, + cfg = "exec", + ), + "_protoxform": attr.label( + default = Label("//tools/protoxform"), + executable = True, + cfg = "exec", + ), + }, + implementation = _proto_xform_aspect_impl, +) diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py new file mode 100755 index 000000000000..6291e311eb52 --- /dev/null +++ b/tools/protoxform/protoxform.py @@ -0,0 +1,423 @@ +# protoc plugin to map from FileDescriptorProtos to a canonicaly formatted +# proto. +# +# protoxform is currently only a formatting tool, but will act as the basis for +# vN -> v(N+1) API migration tooling, allowing for things like deprecated field +# removal, package renaming, field movement, providing both .proto and .cc code +# generation to support automation of Envoy API version translation. +# +# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto +# for the underlying protos mentioned in this file. See + +from collections import deque +import functools +import os +import re +import subprocess + +from tools.api_proto_plugin import plugin +from tools.api_proto_plugin import visitor + +from google.api import annotations_pb2 +from google.protobuf import text_format +from validate import validate_pb2 + +CLANG_FORMAT_STYLE = ('{ColumnLimit: 100, SpacesInContainerLiterals: false, ' + 'AllowShortFunctionsOnASingleLine: false}') + + +class ProtoXformError(Exception): + """Base error class for the protoxform module.""" + + +def ClangFormat(contents): + """Run proto-style oriented clang-format over given string. + + Args: + contents: a string with proto contents. + + Returns: + clang-formatted string + """ + return subprocess.run( + ['clang-format', + '--style=%s' % CLANG_FORMAT_STYLE, '--assume-filename=.proto'], + input=contents.encode('utf-8'), + stdout=subprocess.PIPE).stdout + + +def FormatBlock(block): + """Append \n to a .proto section (e.g. + + comment, message definition, etc.) if non-empty. + + Args: + block: a string representing the section. + + Returns: + A string with appropriate whitespace. + """ + if block.strip(): + return block + '\n' + return '' + + +def FormatComments(comments): + """Format a list of comment blocks from SourceCodeInfo. + + Prefixes // to each line, separates blocks by spaces. + + Args: + comments: a list of blocks, each block is a list of strings representing + lines in each block. + + Returns: + A string reprenting the formatted comment blocks. + """ + + # TODO(htuch): not sure why this is needed, but clang-format does some weird + # stuff with // comment indents when we have these trailing \ + def FixupTrailingBackslash(s): + return s[:-1].rstrip() if s.endswith('\\') else s + + comments = '\n\n'.join( + '\n'.join(['//%s' % FixupTrailingBackslash(line) + for line in comment.split('\n')[:-1]]) + for comment in comments) + return FormatBlock(comments) + + +def FormatTypeContextComments(type_context): + """Format the leading/trailing comments in a given TypeContext. + + Args: + type_context: contextual information for message/enum/field. + + Returns: + Tuple of formatted leading and trailing comment blocks. + """ + leading = FormatComments( + list(type_context.leading_detached_comments) + [type_context.leading_comment.raw]) + trailing = FormatBlock(FormatComments([type_context.trailing_comment])) + return leading, trailing + + +def FormatHeaderFromFile(source_code_info, file_proto): + """Format proto header. + + Args: + source_code_info: SourceCodeInfo object. + file_proto: FileDescriptorProto for file. + + Returns: + Formatted proto header as a string. + """ + + def CamelCase(s): + return ''.join(t.capitalize() for t in re.split('[\._]', s)) + + package_line = 'package %s;\n' % file_proto.package + file_block = '\n'.join(['syntax = "proto3";\n', package_line]) + + options = [ + 'option java_outer_classname = "%s";' % CamelCase(os.path.basename(file_proto.name)), + 'option java_multiple_files = true;', + 'option java_package = "io.envoyproxy.%s";' % file_proto.package, + ] + # This is a workaround for C#/Ruby namespace conflicts between packages and + # objects, see https://github.com/envoyproxy/envoy/pull/3854. + # TODO(htuch): remove once v3 fixes this naming issue in + # https://github.com/envoyproxy/envoy/issues/8120. + if file_proto.package in ['envoy.api.v2.listener', 'envoy.api.v2.cluster']: + qualified_package = '.'.join(s.capitalize() for s in file_proto.package.split('.')) + 'NS' + options += [ + 'option csharp_namespace = "%s";' % qualified_package, + 'option ruby_package = "%s";' % qualified_package, + ] + if file_proto.service: + options += ['option java_generic_services = true;'] + options_block = FormatBlock('\n'.join(options)) + + envoy_imports = [] + google_imports = [] + infra_imports = [] + misc_imports = [] + + for d in file_proto.dependency: + if d.startswith('envoy/'): + envoy_imports.append(d) + elif d.startswith('google/'): + google_imports.append(d) + elif d.startswith('validate/'): + infra_imports.append(d) + else: + misc_imports.append(d) + + def FormatImportBlock(xs): + if not xs: + return '' + return FormatBlock('\n'.join(sorted('import "%s";' % x for x in xs))) + + import_block = '\n'.join( + map(FormatImportBlock, [envoy_imports, google_imports, misc_imports, infra_imports])) + comment_block = FormatComments(source_code_info.file_level_comments) + + return ''.join(map(FormatBlock, [file_block, options_block, import_block, comment_block])) + + +def NormalizeFieldTypeName(type_context, field_fqn): + """Normalize a fully qualified field type name, e.g. + + .envoy.foo.bar is normalized to foo.bar. + + Considers type context to minimize type prefix. + + Args: + field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. + type_context: contextual information for message/enum/field. + + Returns: + Normalized type name as a string. + """ + if field_fqn.startswith('.'): + # Let's say we have type context namespace a.b.c.d.e and the type we're + # trying to normalize is a.b.d.e. We take (from the end) on package fragment + # at a time, and apply the inner-most evaluation that protoc performs to see + # if we evaluate to the fully qualified type. If so, we're done. It's not + # sufficient to compute common prefix and drop that, since in the above + # example the normalized type name would be d.e, which proto resolves inner + # most as a.b.c.d.e (bad) instead of the intended a.b.d.e. + field_fqn_splits = field_fqn[1:].split('.') + type_context_splits = type_context.name.split('.')[:-1] + remaining_field_fqn_splits = deque(field_fqn_splits[:-1]) + normalized_splits = deque([field_fqn_splits[-1]]) + + def EquivalentInTypeContext(splits): + type_context_splits_tmp = deque(type_context_splits) + while type_context_splits_tmp: + # If we're in a.b.c and the FQN is a.d.Foo, we want to return true once + # we have type_context_splits_tmp as [a] and splits as [d, Foo]. + if list(type_context_splits_tmp) + list(splits) == field_fqn_splits: + return True + # If we're in a.b.c.d.e.f and the FQN is a.b.d.e.Foo, we want to return True + # once we have type_context_splits_tmp as [a] and splits as [b, d, e, Foo], but + # not when type_context_splits_tmp is [a, b, c] and FQN is [d, e, Foo]. + if len(splits) > 1 and '.'.join(type_context_splits_tmp).endswith('.'.join( + list(splits)[:-1])): + return False + type_context_splits_tmp.pop() + return False + + while remaining_field_fqn_splits and not EquivalentInTypeContext(normalized_splits): + normalized_splits.appendleft(remaining_field_fqn_splits.pop()) + + return '.'.join(normalized_splits) + return field_fqn + + +def TypeNameFromFQN(fqn): + return fqn[1:] + + +def FormatFieldType(type_context, field): + """Format a FieldDescriptorProto type description. + + Args: + type_context: contextual information for message/enum/field. + field: FieldDescriptor proto. + + Returns: + Formatted proto field type as string. + """ + label = 'repeated ' if field.label == field.LABEL_REPEATED else '' + type_name = label + NormalizeFieldTypeName(type_context, field.type_name) + + if field.type == field.TYPE_MESSAGE: + if type_context.map_typenames and TypeNameFromFQN( + field.type_name) in type_context.map_typenames: + return 'map<%s, %s>' % tuple( + map(functools.partial(FormatFieldType, type_context), + type_context.map_typenames[TypeNameFromFQN(field.type_name)])) + return type_name + elif field.type_name: + return type_name + + pretty_type_names = { + field.TYPE_DOUBLE: 'double', + field.TYPE_FLOAT: 'float', + field.TYPE_INT32: 'int32', + field.TYPE_SFIXED32: 'int32', + field.TYPE_SINT32: 'int32', + field.TYPE_FIXED32: 'uint32', + field.TYPE_UINT32: 'uint32', + field.TYPE_INT64: 'int64', + field.TYPE_SFIXED64: 'int64', + field.TYPE_SINT64: 'int64', + field.TYPE_FIXED64: 'uint64', + field.TYPE_UINT64: 'uint64', + field.TYPE_BOOL: 'bool', + field.TYPE_STRING: 'string', + field.TYPE_BYTES: 'bytes', + } + if field.type in pretty_type_names: + return label + pretty_type_names[field.type] + raise ProtoXformError('Unknown field type ' + str(field.type)) + + +def FormatServiceMethod(type_context, method): + """Format a service MethodDescriptorProto. + + Args: + type_context: contextual information for method. + method: MethodDescriptorProto proto. + + Returns: + Formatted service method as string. + """ + + def FormatStreaming(s): + return 'stream ' if s else '' + + def FormatHttpOptions(options): + if options.HasExtension(annotations_pb2.http): + http_options = options.Extensions[annotations_pb2.http] + return 'option (google.api.http) = { %s };' % str(http_options) + return '' + + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + return '%srpc %s(%s%s%s) returns (%s%s) {%s}\n' % ( + leading_comment, method.name, trailing_comment, FormatStreaming( + method.client_streaming), NormalizeFieldTypeName( + type_context, method.input_type), FormatStreaming(method.server_streaming), + NormalizeFieldTypeName(type_context, method.output_type), FormatHttpOptions(method.options)) + + +def FormatValidateFieldRules(rules): + """Format validate_pb2 rules. + + Args: + rules: validate_pb2 rules proto. + + Returns: + Formatted validation rules as string, suitable for proto field annotation. + """ + return ' '.join('.%s = { %s }' % + (field.name, text_format.MessageToString(value, as_one_line=True)) + for field, value in rules.ListFields()) + + +def FormatField(type_context, field): + """Format FieldDescriptorProto as a proto field. + + Args: + type_context: contextual information for message/enum/field. + field: FieldDescriptor proto. + + Returns: + Formatted proto field as a string. + """ + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + annotations = [] + if field.options.HasExtension(validate_pb2.rules): + rules = field.options.Extensions[validate_pb2.rules] + annotations.append('(validate.rules) %s' % FormatValidateFieldRules(rules)) + if field.options.deprecated: + annotations.append('deprecated = true') + formatted_annotations = '[ %s]' % ','.join(annotations) if annotations else '' + return '%s%s %s = %d%s;\n%s' % (leading_comment, FormatFieldType( + type_context, field), field.name, field.number, formatted_annotations, trailing_comment) + + +def FormatEnumValue(type_context, value): + """Format a EnumValueDescriptorProto as a proto enum value. + + Args: + type_context: contextual information for message/enum/field. + value: EnumValueDescriptorProto. + + Returns: + Formatted proto enum value as a string. + """ + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + annotations = [] + if value.options.deprecated: + annotations.append('deprecated = true') + formatted_annotations = '[ %s]' % ','.join(annotations) if annotations else '' + return '%s%s = %d%s;\n%s' % (leading_comment, value.name, value.number, formatted_annotations, + trailing_comment) + + +class ProtoFormatVisitor(visitor.Visitor): + """Visitor to generate a proto representation from a FileDescriptor proto. + + See visitor.Visitor for visitor method docs comments. + """ + + def VisitService(self, service_proto, type_context): + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + methods = '\n'.join( + FormatServiceMethod(type_context.ExtendMethod(index, m.name), m) + for index, m in enumerate(service_proto.method)) + return '%sservice %s {\n%s%s\n}\n' % (leading_comment, service_proto.name, trailing_comment, + methods) + + def VisitEnum(self, enum_proto, type_context): + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + values = [ + FormatEnumValue(type_context.ExtendField(index, value.name), value) + for index, value in enumerate(enum_proto.value) + ] + joined_values = ('\n' if any('//' in v for v in values) else '').join(values) + return '%senum %s {\n%s%s\n}\n' % (leading_comment, enum_proto.name, trailing_comment, + joined_values) + + def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): + leading_comment, trailing_comment = FormatTypeContextComments(type_context) + formatted_enums = FormatBlock('\n'.join(nested_enums)) + formatted_msgs = FormatBlock('\n'.join(nested_msgs)) + # Reserved fields. + reserved_fields = FormatBlock('reserved %s;\n' % ','.join( + map(str, sum([list(range(rr.start, rr.end)) for rr in msg_proto.reserved_range], + [])))) if msg_proto.reserved_range else '' + # Recover the oneof structure. This needs some extra work, since + # DescriptorProto just gives use fields and a oneof_index that can allow + # recovery of the original oneof placement. + fields = '' + oneof_index = None + for index, field in enumerate(msg_proto.field): + if oneof_index is not None: + if not field.HasField('oneof_index') or field.oneof_index != oneof_index: + fields += '}\n\n' + oneof_index = None + if oneof_index is None and field.HasField('oneof_index'): + oneof_index = field.oneof_index + oneof_proto = msg_proto.oneof_decl[oneof_index] + if oneof_proto.options.HasExtension(validate_pb2.required): + oneof_options = 'option (validate.required) = true;\n\n' + else: + oneof_options = '' + oneof_leading_comment, oneof_trailing_comment = FormatTypeContextComments( + type_context.ExtendOneof(oneof_index, field.name)) + fields += '%soneof %s {\n%s%s' % (oneof_leading_comment, oneof_proto.name, + oneof_trailing_comment, oneof_options) + fields += FormatBlock(FormatField(type_context.ExtendField(index, field.name), field)) + if oneof_index is not None: + fields += '}\n\n' + return '%smessage %s {\n%s%s%s%s%s\n}\n' % (leading_comment, msg_proto.name, trailing_comment, + formatted_enums, formatted_msgs, reserved_fields, + fields) + + def VisitFile(self, file_proto, type_context, services, msgs, enums): + header = FormatHeaderFromFile(type_context.source_code_info, file_proto) + formatted_services = FormatBlock('\n'.join(services)) + formatted_enums = FormatBlock('\n'.join(enums)) + formatted_msgs = FormatBlock('\n'.join(msgs)) + return ClangFormat(header + formatted_services + formatted_enums + formatted_msgs) + + +def Main(): + plugin.Plugin('.proto', ProtoFormatVisitor()) + + +if __name__ == '__main__': + Main() diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index 0cd9883bd93b..0b49c5a9af85 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -154,6 +154,7 @@ LDS LEV LHS LF +LRS MB MD MGET @@ -427,6 +428,7 @@ decls dedup dedupe deduplicate +deduplicates deflateInit deletable deleter diff --git a/tools/testdata/check_format/api/go_package.proto b/tools/testdata/check_format/api/go_package.proto deleted file mode 100644 index b32347b6e46f..000000000000 --- a/tools/testdata/check_format/api/go_package.proto +++ /dev/null @@ -1,5 +0,0 @@ -option go_package = "foo"; -option java_package = "io.envoyproxy.envoy.foo"; -option java_outer_classname = "JavaOptionsProto"; -option java_multiple_files = true; -package envoy.foo; diff --git a/tools/testdata/check_format/api/java_options.proto b/tools/testdata/check_format/api/java_options.proto deleted file mode 100644 index a979cc6d2374..000000000000 --- a/tools/testdata/check_format/api/java_options.proto +++ /dev/null @@ -1 +0,0 @@ -package envoy.foo; diff --git a/tools/testdata/check_format/api/java_options.proto.gold b/tools/testdata/check_format/api/java_options.proto.gold deleted file mode 100644 index 54513d2a81fe..000000000000 --- a/tools/testdata/check_format/api/java_options.proto.gold +++ /dev/null @@ -1,5 +0,0 @@ -package envoy.foo; - -option java_package = "io.envoyproxy.envoy.foo"; -option java_outer_classname = "JavaOptionsProto"; -option java_multiple_files = true; diff --git a/tools/testdata/check_format/grpc_init.cc b/tools/testdata/check_format/grpc_init.cc new file mode 100644 index 000000000000..5f4d96fe8ca2 --- /dev/null +++ b/tools/testdata/check_format/grpc_init.cc @@ -0,0 +1,7 @@ +namespace Envoy { + +void foo() { + grpc_init(); +} + +} // namespace Envoy diff --git a/tools/testdata/check_format/grpc_shutdown.cc b/tools/testdata/check_format/grpc_shutdown.cc new file mode 100644 index 000000000000..ff25bad98a25 --- /dev/null +++ b/tools/testdata/check_format/grpc_shutdown.cc @@ -0,0 +1,7 @@ +namespace Envoy { + +void foo() { + grpc_shutdown(); +} + +} // namespace Envoy