Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into fix/rbac_dns_san
Browse files Browse the repository at this point in the history
Signed-off-by: Rama Chavali <rama.rao@salesforce.com>
  • Loading branch information
ramaraochavali committed Aug 14, 2019
2 parents a78a6fe + 3dd84d3 commit 25a4637
Show file tree
Hide file tree
Showing 182 changed files with 5,674 additions and 1,274 deletions.
17 changes: 16 additions & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,11 +86,24 @@ jobs:
command:
ci/do_circle_ci.sh bazel.coverage
no_output_timeout: 60m
- run: ci/coverage_publish.sh
- persist_to_workspace:
root: /build/envoy/generated
paths:
- coverage
- store_artifacts:
path: /build/envoy/generated
destination: /

coverage_publish:
docker:
- image: google/cloud-sdk
steps:
- run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken
- checkout
- attach_workspace:
at: /build/envoy/generated
- run: ci/coverage_publish.sh

clang_tidy:
executor: ubuntu-build
steps:
Expand Down Expand Up @@ -150,6 +163,8 @@ workflows:
- api
- filter_example_mirror
- coverage
- coverage_publish:
requires: [coverage]
- format
- clang_tidy
- build_image
Expand Down
1 change: 0 additions & 1 deletion api/envoy/api/v2/cluster/filter.proto
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ option csharp_namespace = "Envoy.Api.V2.ClusterNS";
option ruby_package = "Envoy.Api.V2.ClusterNS";

import "google/protobuf/any.proto";
import "google/protobuf/struct.proto";

import "validate/validate.proto";
import "gogoproto/gogo.proto";
Expand Down
3 changes: 3 additions & 0 deletions api/envoy/api/v2/core/config_source.proto
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,9 @@ message ApiConfigSource {
// For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be
// rate limited.
RateLimitSettings rate_limit_settings = 6;

// Skip the node identifier in subsequent discovery requests for streaming gRPC config types.
bool set_node_on_first_message_only = 7;
}

// Aggregated Discovery Service (ADS) options. This is currently empty, but when
Expand Down
58 changes: 58 additions & 0 deletions api/envoy/api/v2/core/protocol.proto
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ message Http1ProtocolOptions {
string default_host_for_http_10 = 3;
}

// [#comment:next free field: 13]
message Http2ProtocolOptions {
// `Maximum table size <https://httpwg.org/specs/rfc7541.html#rfc.section.4.2>`_
// (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values
Expand Down Expand Up @@ -91,6 +92,63 @@ message Http2ProtocolOptions {
// docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more
// information.
bool allow_metadata = 6;

// Limit the number of pending outbound downstream frames of all types (frames that are waiting to
// be written into the socket). Exceeding this limit triggers flood mitigation and connection is
// terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due
// to flood mitigation. The default limit is 10000.
// [#comment:TODO: implement same limits for upstream outbound frames as well.]
google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}];

// Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM,
// preventing high memory utilization when receiving continuous stream of these frames. Exceeding
// this limit triggers flood mitigation and connection is terminated. The
// ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood
// mitigation. The default limit is 1000.
// [#comment:TODO: implement same limits for upstream outbound frames as well.]
google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}];

// Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an
// empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but
// might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood``
// stat tracks the number of connections terminated due to flood mitigation.
// Setting this to 0 will terminate connection upon receiving first frame with an empty payload
// and no end stream flag. The default limit is 1.
// [#comment:TODO: implement same limits for upstream inbound frames as well.]
google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9;

// Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number
// of PRIORITY frames received over the lifetime of connection exceeds the value calculated
// using this formula::
//
// max_inbound_priority_frames_per_stream * (1 + inbound_streams)
//
// the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks
// the number of connections terminated due to flood mitigation. The default limit is 100.
// [#comment:TODO: implement same limits for upstream inbound frames as well.]
google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10;

// Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number
// of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated
// using this formula::
//
// 1 + 2 * (inbound_streams +
// max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames)
//
// the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks
// the number of connections terminated due to flood mitigation. The default limit is 10.
// Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control,
// but more complex implementations that try to estimate available bandwidth require at least 2.
// [#comment:TODO: implement same limits for upstream inbound frames as well.]
google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11
[(validate.rules).uint32 = {gte: 1}];

// Allows invalid HTTP messaging and headers. When this option is disabled (default), then
// the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,
// when this option is enabled, only the offending stream is terminated.
//
// See [RFC7540, sec. 8.1](https://tools.ietf.org/html/rfc7540#section-8.1) for details.
bool stream_error_on_invalid_http_messaging = 12;
}

// [#not-implemented-hide:]
Expand Down
14 changes: 12 additions & 2 deletions api/envoy/api/v2/lds.proto
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ service ListenerDiscoveryService {
}
}

// [#comment:next free field: 17]
// [#comment:next free field: 18]
message Listener {
// The unique name by which this listener is known. If no name is provided,
// Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically
Expand Down Expand Up @@ -132,10 +132,20 @@ message Listener {
repeated listener.ListenerFilter listener_filters = 9;

// The timeout to wait for all listener filters to complete operation. If the timeout is reached,
// the accepted socket is closed without a connection being created. Specify 0 to disable the
// the accepted socket is closed without a connection being created unless
// `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the
// timeout. If not specified, a default timeout of 15s is used.
google.protobuf.Duration listener_filters_timeout = 15 [(gogoproto.stdduration) = true];

// Whether a connection should be created when listener filters timeout. Default is false.
//
// .. attention::
//
// Some listener filters, such as :ref:`Proxy Protocol filter
// <config_listener_filters_proxy_protocol>`, should not be used with this option. It will cause
// unexpected behavior when a connection is created.
bool continue_on_listener_filters_timeout = 17;

// Whether the listener should be set as a transparent socket.
// When this flag is set to true, connections can be redirected to the listener using an
// *iptables* *TPROXY* target, in which case the original source and destination addresses and
Expand Down
2 changes: 2 additions & 0 deletions api/envoy/config/bootstrap/v2/bootstrap.proto
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,8 @@ message RuntimeLayer {
// This follows the :ref:`runtime protobuf JSON representation encoding
// <config_runtime_proto_json>`. Unlike static xDS resources, this static
// layer is overridable by later layers in the runtime virtual filesystem.
option (validate.required) = true;

google.protobuf.Struct static_layer = 2;
DiskLayer disk_layer = 3;
AdminLayer admin_layer = 4;
Expand Down
3 changes: 3 additions & 0 deletions api/envoy/config/trace/v2/trace.proto
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,9 @@ message OpenCensusConfig {

// "X-Cloud-Trace-Context:" header.
CLOUD_TRACE_CONTEXT = 3;

// X-B3-* headers.
B3 = 4;
}

// List of incoming trace context headers we will accept. First one found
Expand Down
7 changes: 7 additions & 0 deletions api/xds_protocol.rst
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,13 @@ versioning across resource types. When ADS is not used, even each
resource of a given resource type may have a distinct version, since the
Envoy API allows distinct EDS/RDS resources to point at different :ref:`ConfigSources <envoy_api_msg_core.ConfigSource>`.

Only the first request on a stream is guaranteed to carry the node identifier.
The subsequent discovery requests on the same stream may carry an empty node
identifier. This holds true regardless of the acceptance of the discovery
responses on the same stream. The node identifier should always be identical if
present more than once on the stream. It is sufficient to only check the first
message for the node identifier as a result.

.. _xds_protocol_resource_update:

Resource Update
Expand Down
2 changes: 1 addition & 1 deletion bazel/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ have seen some issues with seeing the artifacts tab. If you can't see it, log ou
then log back in and it should start working.

The latest coverage report for master is available
[here](https://s3.amazonaws.com/lyft-envoy/coverage/report-master/index.html).
[here](https://storage.googleapis.com/envoy-coverage/report-master/index.html).

It's also possible to specialize the coverage build to a specified test or test dir. This is useful
when doing things like exploring the coverage of a fuzzer over its corpus. This can be done by
Expand Down
56 changes: 56 additions & 0 deletions bazel/grpc-protoinfo-1.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
commit 49f0fb9035120d0f5b5fa49846324c0b2d59c257
Author: Marcel Hlopko <hlopko@google.com>
Date: Thu Jun 20 18:55:56 2019 +0200

Migrate from dep.proto to dep[ProtoInfo]

diff --git a/WORKSPACE b/WORKSPACE
index 2db3c5db2f..60582d1a0f 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -20,7 +20,7 @@ register_toolchains(

git_repository(
name = "io_bazel_rules_python",
- commit = "8b5d0683a7d878b28fffe464779c8a53659fc645",
+ commit = "fdbb17a4118a1728d19e638a5291b4c4266ea5b8",
remote = "https://github.com/bazelbuild/rules_python.git",
)

diff --git a/bazel/generate_cc.bzl b/bazel/generate_cc.bzl
index b7edcda702..581165a190 100644
--- a/bazel/generate_cc.bzl
+++ b/bazel/generate_cc.bzl
@@ -41,11 +41,11 @@ def _join_directories(directories):

def generate_cc_impl(ctx):
"""Implementation of the generate_cc rule."""
- protos = [f for src in ctx.attr.srcs for f in src.proto.check_deps_sources.to_list()]
+ protos = [f for src in ctx.attr.srcs for f in src[ProtoInfo].check_deps_sources.to_list()]
includes = [
f
for src in ctx.attr.srcs
- for f in src.proto.transitive_imports.to_list()
+ for f in src[ProtoInfo].transitive_imports.to_list()
]
outs = []
proto_root = get_proto_root(
diff --git a/bazel/python_rules.bzl b/bazel/python_rules.bzl
index 17004f3474..3df30f8262 100644
--- a/bazel/python_rules.bzl
+++ b/bazel/python_rules.bzl
@@ -28,12 +28,12 @@ def _get_staged_proto_file(context, source_file):
def _generate_py_impl(context):
protos = []
for src in context.attr.deps:
- for file in src.proto.direct_sources:
+ for file in src[ProtoInfo].direct_sources:
protos.append(_get_staged_proto_file(context, file))
includes = [
file
for src in context.attr.deps
- for file in src.proto.transitive_imports.to_list()
+ for file in src[ProtoInfo].transitive_imports.to_list()
]
proto_root = get_proto_root(context.label.workspace_root)
format_str = (_GENERATED_GRPC_PROTO_FORMAT if context.executable.plugin else _GENERATED_PROTO_FORMAT)
32 changes: 32 additions & 0 deletions bazel/grpc-protoinfo-2.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
commit ecf04ccf4d8be9378166ec9e0ccf44081e211d11
Author: Marcel Hlopko <hlopko@google.com>
Date: Thu Jun 20 18:57:33 2019 +0200

Require ProtoInfo in attributes, not "proto"

diff --git a/bazel/generate_cc.bzl b/bazel/generate_cc.bzl
index 581165a190..87e8b9d329 100644
--- a/bazel/generate_cc.bzl
+++ b/bazel/generate_cc.bzl
@@ -146,7 +146,7 @@ _generate_cc = rule(
"srcs": attr.label_list(
mandatory = True,
allow_empty = False,
- providers = ["proto"],
+ providers = [ProtoInfo],
),
"plugin": attr.label(
executable = True,
diff --git a/bazel/python_rules.bzl b/bazel/python_rules.bzl
index 3df30f8262..d4ff77094c 100644
--- a/bazel/python_rules.bzl
+++ b/bazel/python_rules.bzl
@@ -99,7 +99,7 @@ __generate_py = rule(
"deps": attr.label_list(
mandatory = True,
allow_empty = False,
- providers = ["proto"],
+ providers = [ProtoInfo],
),
"plugin": attr.label(
executable = True,
31 changes: 31 additions & 0 deletions bazel/grpc-protoinfo-3.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
commit e2ba3aa07009292617c3cabe734e8e44099b22ac
Author: Lukacs T. Berki <lberki@google.com>
Date: Tue Aug 6 14:00:11 2019 +0200

Update C++ code generation to work with Bazel 0.29 .

The above Bazel version changes proto compilation slightly: some proto
files are put into a `_virtual_imports` directory and thus
`_get_include_directory` needs to be updated accordingly.

Ideally, it would use instead the `ProtoInfo` provider to tease out the
proto import directories, but that's a bit more intrusive change.

diff --git a/bazel/protobuf.bzl b/bazel/protobuf.bzl
index f2df7bd87b..3066e1d550 100644
--- a/bazel/protobuf.bzl
+++ b/bazel/protobuf.bzl
@@ -59,6 +59,13 @@ def proto_path_to_generated_filename(proto_path, fmt_str):
def _get_include_directory(include):
directory = include.path
prefix_len = 0
+
+ virtual_imports = "/_virtual_imports/"
+ if not include.is_source and virtual_imports in include.path:
+ root, relative = include.path.split(virtual_imports, 2)
+ result = root + virtual_imports + relative.split("/", 1)[0]
+ return result
+
if not include.is_source and directory.startswith(include.root.path):
prefix_len = len(include.root.path) + 1

16 changes: 15 additions & 1 deletion bazel/repositories.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -528,6 +528,10 @@ def _io_opencensus_cpp():
name = "opencensus_trace",
actual = "@io_opencensus_cpp//opencensus/trace",
)
native.bind(
name = "opencensus_trace_b3",
actual = "@io_opencensus_cpp//opencensus/trace:b3",
)
native.bind(
name = "opencensus_trace_cloud_trace_context",
actual = "@io_opencensus_cpp//opencensus/trace:cloud_trace_context",
Expand Down Expand Up @@ -596,7 +600,17 @@ def _com_googlesource_quiche():
)

def _com_github_grpc_grpc():
_repository_impl("com_github_grpc_grpc")
_repository_impl(
"com_github_grpc_grpc",
patches = [
# Workaround for https://github.com/envoyproxy/envoy/issues/7863
"@envoy//bazel:grpc-protoinfo-1.patch",
"@envoy//bazel:grpc-protoinfo-2.patch",
# Pre-integration of https://github.com/grpc/grpc/pull/19860
"@envoy//bazel:grpc-protoinfo-3.patch",
],
patch_args = ["-p1"],
)

# Rebind some stuff to match what the gRPC Bazel is expecting.
native.bind(
Expand Down
2 changes: 1 addition & 1 deletion ci/build_setup.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ echo "ENVOY_BAZEL_ROOT: $env:ENVOY_BAZEL_ROOT"
echo "ENVOY_SRCDIR: $env:ENVOY_SRCDIR"

$env:BAZEL_BASE_OPTIONS="--noworkspace_rc --output_base=$env:ENVOY_BAZEL_ROOT --bazelrc=$env:ENVOY_SRCDIR\windows\tools\bazel.rc"
$env:BAZEL_BUILD_OPTIONS="--strategy=Genrule=standalone --spawn_strategy=standalone --verbose_failures --jobs=$env:NUM_CPUS --show_task_finish --cache_test_results=no --test_output=all $env:BAZEL_BUILD_EXTRA_OPTIONS $env:BAZEL_EXTRA_TEST_OPTIONS"
$env:BAZEL_BUILD_OPTIONS="--features=compiler_param_file --strategy=Genrule=standalone --spawn_strategy=standalone --verbose_failures --jobs=$env:NUM_CPUS --show_task_finish --cache_test_results=no --test_output=all $env:BAZEL_BUILD_EXTRA_OPTIONS $env:BAZEL_EXTRA_TEST_OPTIONS"
2 changes: 1 addition & 1 deletion ci/build_setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ if [ "$1" != "-nofetch" ]; then
fi

# This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to.
(cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch origin && git checkout -f dcd3374baa9365ab7ab505018232994d6c8a8d81)
(cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch origin && git checkout -f 1995c1e0eccea84bbb39f64e75ef3e9102d1ae82)
sed -e "s|{ENVOY_SRCDIR}|${ENVOY_SRCDIR}|" "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter.example > "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/WORKSPACE
fi

Expand Down
Loading

0 comments on commit 25a4637

Please sign in to comment.